aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-api.tmpl4
-rw-r--r--Documentation/filesystems/ecryptfs.txt (renamed from Documentation/ecryptfs.txt)0
-rw-r--r--Documentation/filesystems/proc.txt15
-rw-r--r--Documentation/kernel-parameters.txt26
-rw-r--r--Documentation/oops-tracing.txt2
-rw-r--r--Documentation/power/freezing-of-tasks.txt160
-rw-r--r--Documentation/power/kernel_threads.txt40
-rw-r--r--Documentation/power/swsusp.txt18
-rw-r--r--Documentation/rtc.txt2
-rw-r--r--Documentation/spi/spi-lm70llp69
-rw-r--r--Documentation/sysctl/vm.txt3
-rw-r--r--Documentation/vm/slub.txt137
-rw-r--r--arch/alpha/kernel/ptrace.c4
-rw-r--r--arch/alpha/kernel/smp.c6
-rw-r--r--arch/alpha/kernel/traps.c1
-rw-r--r--arch/alpha/lib/checksum.c1
-rw-r--r--arch/arm/kernel/ptrace.c15
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/mach-at91/board-csb337.c10
-rw-r--r--arch/arm/mach-iop32x/n2100.c10
-rw-r--r--arch/arm26/kernel/ptrace.c15
-rw-r--r--arch/arm26/kernel/traps.c1
-rw-r--r--arch/avr32/kernel/ptrace.c13
-rw-r--r--arch/avr32/kernel/traps.c1
-rw-r--r--arch/cris/arch-v10/kernel/ptrace.c21
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c7
-rw-r--r--arch/frv/kernel/ptrace.c16
-rw-r--r--arch/h8300/kernel/ptrace.c5
-rw-r--r--arch/i386/Kconfig4
-rw-r--r--arch/i386/Makefile1
-rw-r--r--arch/i386/kernel/apm.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c6
-rw-r--r--arch/i386/kernel/efi.c2
-rw-r--r--arch/i386/kernel/io_apic.c1
-rw-r--r--arch/i386/kernel/nmi.c8
-rw-r--r--arch/i386/kernel/ptrace.c17
-rw-r--r--arch/i386/kernel/smpcommon.c8
-rw-r--r--arch/i386/kernel/traps.c1
-rw-r--r--arch/i386/video/Makefile1
-rw-r--r--arch/i386/video/fbdev.c32
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/common/sba_iommu.c20
-rw-r--r--arch/ia64/hp/sim/boot/fw-emu.c5
-rw-r--r--arch/ia64/hp/sim/simserial.c4
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/traps.c1
-rw-r--r--arch/ia64/lib/checksum.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c3
-rw-r--r--arch/m32r/kernel/ptrace.c19
-rw-r--r--arch/m68k/kernel/ptrace.c8
-rw-r--r--arch/m68k/kernel/traps.c1
-rw-r--r--arch/m68k/lib/checksum.c1
-rw-r--r--arch/m68knommu/kernel/ptrace.c17
-rw-r--r--arch/m68knommu/kernel/traps.c1
-rw-r--r--arch/mips/kernel/ptrace.c18
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/sibyte/bcm1480/setup.c1
-rw-r--r--arch/mips/sibyte/sb1250/setup.c1
-rw-r--r--arch/parisc/kernel/ptrace.c13
-rw-r--r--arch/parisc/kernel/traps.c1
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/ptrace.c18
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c125
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c15
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c4
-rw-r--r--arch/powerpc/platforms/ps3/spu.c6
-rw-r--r--arch/ppc/kernel/traps.c1
-rw-r--r--arch/ppc/syslib/virtex_devices.h7
-rw-r--r--arch/s390/kernel/ptrace.c11
-rw-r--r--arch/s390/kernel/traps.c1
-rw-r--r--arch/sh/kernel/ptrace.c18
-rw-r--r--arch/sh/kernel/traps.c1
-rw-r--r--arch/sh64/kernel/ptrace.c17
-rw-r--r--arch/sh64/lib/c-checksum.c1
-rw-r--r--arch/sparc/Kconfig3
-rw-r--r--arch/sparc/kernel/traps.c1
-rw-r--r--arch/sparc64/defconfig155
-rw-r--r--arch/sparc64/kernel/hvtramp.S3
-rw-r--r--arch/sparc64/kernel/signal.c15
-rw-r--r--arch/sparc64/kernel/traps.c1
-rw-r--r--arch/um/drivers/pcap_user.c2
-rw-r--r--arch/um/kernel/ptrace.c18
-rw-r--r--arch/v850/kernel/ptrace.c14
-rw-r--r--arch/x86_64/Kconfig4
-rw-r--r--arch/x86_64/kernel/nmi.c8
-rw-r--r--arch/x86_64/kernel/ptrace.c17
-rw-r--r--arch/x86_64/kernel/smp.c12
-rw-r--r--arch/x86_64/kernel/traps.c2
-rw-r--r--arch/xtensa/kernel/ptrace.c17
-rw-r--r--arch/xtensa/kernel/traps.c1
-rw-r--r--block/as-iosched.c3
-rw-r--r--block/cfq-iosched.c18
-rw-r--r--block/deadline-iosched.c3
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c20
-rw-r--r--block/ll_rw_blk.c4
-rw-r--r--drivers/ata/sata_mv.c2
-rw-r--r--drivers/atm/ambassador.c4
-rw-r--r--drivers/atm/zatm.c4
-rw-r--r--drivers/block/Kconfig6
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/pktcdvd.c1
-rw-r--r--drivers/block/xsysace.c1164
-rw-r--r--drivers/block/z2ram.c4
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/apm-emulation.c12
-rw-r--r--drivers/char/cyclades.c367
-rw-r--r--drivers/char/drm/drm_stub.c2
-rw-r--r--drivers/char/drm/sis_mm.c2
-rw-r--r--drivers/char/hvc_console.c1
-rw-r--r--drivers/char/isicom.c93
-rw-r--r--drivers/char/istallion.c9
-rw-r--r--drivers/char/moxa.c37
-rw-r--r--drivers/char/riscom8.c12
-rw-r--r--drivers/char/specialix.c16
-rw-r--r--drivers/char/stallion.c5
-rw-r--r--drivers/char/vt.c35
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/hwmon/lm70.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c2
-rw-r--r--drivers/i2c/chips/Kconfig10
-rw-r--r--drivers/i2c/chips/Makefile1
-rw-r--r--drivers/i2c/chips/menelaus.c1281
-rw-r--r--drivers/ide/ide-probe.c4
-rw-r--r--drivers/ieee1394/ieee1394_core.c3
-rw-r--r--drivers/ieee1394/nodemgr.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c1
-rw-r--r--drivers/isdn/Kconfig15
-rw-r--r--drivers/isdn/capi/Kconfig7
-rw-r--r--drivers/isdn/capi/capi.c2
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/isdn/capi/kcapi_proc.c28
-rw-r--r--drivers/isdn/hardware/Kconfig1
-rw-r--r--drivers/isdn/hardware/avm/Kconfig23
-rw-r--r--drivers/isdn/hardware/eicon/Kconfig22
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c1
-rw-r--r--drivers/isdn/hisax/bkm_a4t.c108
-rw-r--r--drivers/isdn/hisax/config.c243
-rw-r--r--drivers/isdn/hisax/enternow_pci.c165
-rw-r--r--drivers/isdn/hisax/hfc_pci.c191
-rw-r--r--drivers/isdn/hisax/nj_s.c194
-rw-r--r--drivers/isdn/hisax/nj_u.c167
-rw-r--r--drivers/isdn/hisax/sedlbauer.c8
-rw-r--r--drivers/isdn/i4l/Kconfig7
-rw-r--r--drivers/kvm/Kconfig9
-rw-r--r--drivers/kvm/kvm.h116
-rw-r--r--drivers/kvm/kvm_main.c456
-rw-r--r--drivers/kvm/mmu.c292
-rw-r--r--drivers/kvm/paging_tmpl.h273
-rw-r--r--drivers/kvm/svm.c59
-rw-r--r--drivers/kvm/svm.h3
-rw-r--r--drivers/kvm/vmx.c652
-rw-r--r--drivers/kvm/x86_emulate.c44
-rw-r--r--drivers/macintosh/therm_adt746x.c1
-rw-r--r--drivers/macintosh/windfarm_core.c1
-rw-r--r--drivers/md/Kconfig15
-rw-r--r--drivers/md/bitmap.c169
-rw-r--r--drivers/md/dm.c4
-rw-r--r--drivers/md/md.c71
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c1
-rw-r--r--drivers/media/video/cx88/cx88-tvaudio.c1
-rw-r--r--drivers/media/video/msp3400-kthreads.c6
-rw-r--r--drivers/media/video/tvaudio.c2
-rw-r--r--drivers/media/video/video-buf-dvb.c1
-rw-r--r--drivers/media/video/vivi.c1
-rw-r--r--drivers/message/i2o/debug.c134
-rw-r--r--drivers/message/i2o/exec-osm.c6
-rw-r--r--drivers/message/i2o/i2o_block.c2
-rw-r--r--drivers/message/i2o/i2o_config.c62
-rw-r--r--drivers/mfd/ucb1x00-ts.c1
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ibmasm/command.c14
-rw-r--r--drivers/misc/ibmasm/dot_command.c10
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c8
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h10
-rw-r--r--drivers/misc/ibmasm/ibmasm.h70
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c24
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h16
-rw-r--r--drivers/misc/ibmasm/module.c10
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c10
-rw-r--r--drivers/misc/ibmasm/remote.c37
-rw-r--r--drivers/misc/ibmasm/remote.h8
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/mmc/card/queue.c7
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/ubi/eba.c4
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/atl1/atl1_main.c1
-rw-r--r--drivers/net/eepro100.c7
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c7
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/tokenring/smctr.c6
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/sbni.c7
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/libertas/main.c1
-rw-r--r--drivers/parisc/hppb.c1
-rw-r--r--drivers/pcmcia/cs.c1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c548
-rw-r--r--drivers/pnp/pnpbios/core.c1
-rw-r--r--drivers/rtc/Kconfig64
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/rtc-at32ap700x.c317
-rw-r--r--drivers/rtc/rtc-dev.c2
-rw-r--r--drivers/rtc/rtc-ds1216.c226
-rw-r--r--drivers/rtc/rtc-ds1307.c300
-rw-r--r--drivers/rtc/rtc-m41t80.c917
-rw-r--r--drivers/rtc/rtc-m48t59.c491
-rw-r--r--drivers/rtc/rtc-rs5c372.c95
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/sbus/char/jsflash.c3
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/serial/Kconfig28
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/sb1250-duart.c972
-rw-r--r--drivers/spi/Kconfig45
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel_spi.c185
-rw-r--r--drivers/spi/au1550_spi.c9
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c9
-rw-r--r--drivers/spi/omap2_mcspi.c1081
-rw-r--r--drivers/spi/omap_uwire.c9
-rw-r--r--drivers/spi/pxa2xx_spi.c9
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/spi/spi_bitbang.c8
-rw-r--r--drivers/spi/spi_imx.c24
-rw-r--r--drivers/spi/spi_lm70llp.c361
-rw-r--r--drivers/spi/spi_mpc83xx.c47
-rw-r--r--drivers/spi/spi_s3c24xx.c8
-rw-r--r--drivers/spi/spi_txx9.c474
-rw-r--r--drivers/spi/spidev.c6
-rw-r--r--drivers/spi/tle62x0.c328
-rw-r--r--drivers/spi/xilinx_spi.c434
-rw-r--r--drivers/telephony/Kconfig1
-rw-r--r--drivers/telephony/ixj.c7
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/atm/ueagle-atm.c1
-rw-r--r--drivers/usb/core/hub.c1
-rw-r--r--drivers/usb/gadget/file_storage.c3
-rw-r--r--drivers/usb/misc/auerswald.c4
-rw-r--r--drivers/usb/storage/usb.c3
-rw-r--r--drivers/video/68328fb.c2
-rw-r--r--drivers/video/Kconfig6
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/aty/ati_ids.h1
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/aty/radeonfb.h2
-rw-r--r--drivers/video/console/Kconfig16
-rw-r--r--drivers/video/console/fbcon.c366
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/cyblafb.c21
-rw-r--r--drivers/video/epson1355fb.c21
-rw-r--r--drivers/video/fbmem.c299
-rw-r--r--drivers/video/fm2fb.c16
-rw-r--r--drivers/video/gbefb.c41
-rw-r--r--drivers/video/i810/i810.h2
-rw-r--r--drivers/video/intelfb/intelfb.h2
-rw-r--r--drivers/video/logo/Kconfig5
-rw-r--r--drivers/video/logo/Makefile2
-rw-r--r--drivers/video/logo/logo_spe_clut224.ppm283
-rw-r--r--drivers/video/macfb.c93
-rw-r--r--drivers/video/macmodes.c5
-rw-r--r--drivers/video/macmodes.h8
-rw-r--r--drivers/video/matrox/matroxfb_accel.c11
-rw-r--r--drivers/video/matrox/matroxfb_base.c4
-rw-r--r--drivers/video/matrox/matroxfb_base.h2
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c6
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.h2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c9
-rw-r--r--drivers/video/nvidia/nv_hw.c62
-rw-r--r--drivers/video/nvidia/nv_setup.c12
-rw-r--r--drivers/video/nvidia/nv_type.h1
-rw-r--r--drivers/video/nvidia/nvidia.c9
-rw-r--r--drivers/video/offb.c2
-rw-r--r--drivers/video/omap/Kconfig58
-rw-r--r--drivers/video/omap/Makefile29
-rw-r--r--drivers/video/omap/blizzard.c1568
-rw-r--r--drivers/video/omap/dispc.c1502
-rw-r--r--drivers/video/omap/dispc.h43
-rw-r--r--drivers/video/omap/hwa742.c1077
-rw-r--r--drivers/video/omap/lcd_h3.c141
-rw-r--r--drivers/video/omap/lcd_h4.c117
-rw-r--r--drivers/video/omap/lcd_inn1510.c124
-rw-r--r--drivers/video/omap/lcd_inn1610.c150
-rw-r--r--drivers/video/omap/lcd_osk.c144
-rw-r--r--drivers/video/omap/lcd_palmte.c123
-rw-r--r--drivers/video/omap/lcd_palmtt.c127
-rw-r--r--drivers/video/omap/lcd_palmz71.c123
-rw-r--r--drivers/video/omap/lcd_sx1.c334
-rw-r--r--drivers/video/omap/lcdc.c893
-rw-r--r--drivers/video/omap/lcdc.h7
-rw-r--r--drivers/video/omap/omapfb_main.c1941
-rw-r--r--drivers/video/omap/rfbi.c588
-rw-r--r--drivers/video/omap/sossi.c686
-rw-r--r--drivers/video/platinumfb.c2
-rw-r--r--drivers/video/pm2fb.c202
-rw-r--r--drivers/video/pm3fb.c270
-rw-r--r--drivers/video/ps3fb.c1
-rw-r--r--drivers/video/pvr2fb.c7
-rw-r--r--drivers/video/q40fb.c2
-rw-r--r--drivers/video/riva/riva_hw.c7
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sis/sis.h2
-rw-r--r--drivers/video/sis/sis_main.c6
-rw-r--r--drivers/video/tgafb.c2
-rw-r--r--drivers/video/tridentfb.c30
-rw-r--r--drivers/video/tx3912fb.c2
-rw-r--r--drivers/video/vt8623fb.c42
-rw-r--r--drivers/w1/w1.c1
-rw-r--r--fs/9p/v9fs.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/anon_inodes.c1
-rw-r--r--fs/attr.c4
-rw-r--r--fs/buffer.c58
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/export.c1
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/dquot.c7
-rw-r--r--fs/ecryptfs/inode.c4
-rw-r--r--fs/efs/namei.c32
-rw-r--r--fs/efs/super.c2
-rw-r--r--fs/exportfs/expfs.c439
-rw-r--r--fs/ext2/acl.c2
-rw-r--r--fs/ext2/ioctl.c4
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext3/acl.c2
-rw-r--r--fs/ext3/ioctl.c6
-rw-r--r--fs/ext3/super.c1
-rw-r--r--fs/ext4/acl.c2
-rw-r--r--fs/ext4/ioctl.c6
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/fat/inode.c1
-rw-r--r--fs/fcntl.c2
-rw-r--r--fs/generic_acl.c2
-rw-r--r--fs/gfs2/acl.c2
-rw-r--r--fs/gfs2/ops_export.c1
-rw-r--r--fs/hfsplus/ioctl.c2
-rw-r--r--fs/inode.c17
-rw-r--r--fs/isofs/isofs.h1
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/background.c1
-rw-r--r--fs/jfs/ioctl.c2
-rw-r--r--fs/jfs/jfs_inode.h1
-rw-r--r--fs/jfs/namei.c32
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/jfs/xattr.c2
-rw-r--r--fs/lockd/svc.c29
-rw-r--r--fs/mbcache.c9
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/super.c10
-rw-r--r--fs/nfsd/auth.c18
-rw-r--r--fs/nfsd/export.c289
-rw-r--r--fs/nfsd/lockd.c1
-rw-r--r--fs/nfsd/nfs4acl.c12
-rw-r--r--fs/nfsd/nfs4callback.c2
-rw-r--r--fs/nfsd/nfs4idmap.c13
-rw-r--r--fs/nfsd/nfs4proc.c35
-rw-r--r--fs/nfsd/nfs4state.c46
-rw-r--r--fs/nfsd/nfs4xdr.c101
-rw-r--r--fs/nfsd/nfsctl.c3
-rw-r--r--fs/nfsd/nfsfh.c51
-rw-r--r--fs/nfsd/nfsproc.c3
-rw-r--r--fs/nfsd/nfssvc.c12
-rw-r--r--fs/nfsd/vfs.c110
-rw-r--r--fs/ntfs/namei.c1
-rw-r--r--fs/ocfs2/export.h2
-rw-r--r--fs/ocfs2/file.c3
-rw-r--r--fs/ocfs2/ioctl.c2
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/ramfs/inode.c1
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/ioctl.c5
-rw-r--r--fs/reiserfs/super.c1
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/utimes.c2
-rw-r--r--fs/xattr.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h2
-rw-r--r--fs/xfs/quota/xfs_qm.c10
-rw-r--r--include/asm-alpha/fb.h13
-rw-r--r--include/asm-alpha/page.h3
-rw-r--r--include/asm-alpha/termios.h4
-rw-r--r--include/asm-arm/fb.h19
-rw-r--r--include/asm-arm/pgtable.h6
-rw-r--r--include/asm-arm26/fb.h12
-rw-r--r--include/asm-avr32/fb.h21
-rw-r--r--include/asm-blackfin/fb.h12
-rw-r--r--include/asm-cris/fb.h12
-rw-r--r--include/asm-cris/page.h3
-rw-r--r--include/asm-frv/fb.h12
-rw-r--r--include/asm-frv/pgtable.h8
-rw-r--r--include/asm-generic/bug.h2
-rw-r--r--include/asm-generic/pgtable.h44
-rw-r--r--include/asm-generic/unaligned.h16
-rw-r--r--include/asm-h8300/fb.h12
-rw-r--r--include/asm-h8300/page.h3
-rw-r--r--include/asm-i386/fb.h17
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/pgtable.h32
-rw-r--r--include/asm-ia64/fb.h23
-rw-r--r--include/asm-ia64/ioctls.h4
-rw-r--r--include/asm-ia64/page.h13
-rw-r--r--include/asm-ia64/pgtable.h23
-rw-r--r--include/asm-ia64/termbits.h5
-rw-r--r--include/asm-ia64/termios.h6
-rw-r--r--include/asm-m32r/fb.h19
-rw-r--r--include/asm-m32r/page.h3
-rw-r--r--include/asm-m32r/pgtable.h6
-rw-r--r--include/asm-m68k/fb.h34
-rw-r--r--include/asm-m68knommu/fb.h12
-rw-r--r--include/asm-m68knommu/page.h3
-rw-r--r--include/asm-mips/fb.h19
-rw-r--r--include/asm-mips/sibyte/bcm1480_regs.h30
-rw-r--r--include/asm-mips/sibyte/sb1250_regs.h76
-rw-r--r--include/asm-mips/sibyte/sb1250_uart.h7
-rw-r--r--include/asm-parisc/fb.h19
-rw-r--r--include/asm-parisc/pgtable.h16
-rw-r--r--include/asm-powerpc/fb.h21
-rw-r--r--include/asm-powerpc/kprobes.h4
-rw-r--r--include/asm-powerpc/pgtable-ppc32.h7
-rw-r--r--include/asm-powerpc/pgtable-ppc64.h31
-rw-r--r--include/asm-ppc/pgtable.h7
-rw-r--r--include/asm-s390/fb.h12
-rw-r--r--include/asm-s390/page.h3
-rw-r--r--include/asm-s390/pgtable.h58
-rw-r--r--include/asm-sh/fb.h19
-rw-r--r--include/asm-sh64/fb.h19
-rw-r--r--include/asm-sparc/fb.h12
-rw-r--r--include/asm-sparc64/fb.h18
-rw-r--r--include/asm-v850/fb.h12
-rw-r--r--include/asm-x86_64/fb.h19
-rw-r--r--include/asm-x86_64/page.h3
-rw-r--r--include/asm-x86_64/pgtable.h8
-rw-r--r--include/asm-xtensa/fb.h12
-rw-r--r--include/asm-xtensa/pgtable.h12
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/crc7.h14
-rw-r--r--include/linux/efs_fs.h1
-rw-r--r--include/linux/exportfs.h126
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/freezer.h14
-rw-r--r--include/linux/fs.h125
-rw-r--r--include/linux/fsl_devices.h2
-rw-r--r--include/linux/gfp.h19
-rw-r--r--include/linux/highmem.h51
-rw-r--r--include/linux/hugetlb.h2
-rw-r--r--include/linux/kallsyms.h6
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kernelcapi.h2
-rw-r--r--include/linux/limits.h2
-rw-r--r--include/linux/linux_logo.h8
-rw-r--r--include/linux/lockd/bind.h9
-rw-r--r--include/linux/magic.h1
-rw-r--r--include/linux/mempolicy.h6
-rw-r--r--include/linux/mm.h39
-rw-r--r--include/linux/mmzone.h28
-rw-r--r--include/linux/nfsd/export.h41
-rw-r--r--include/linux/nfsd/interface.h13
-rw-r--r--include/linux/nfsd/nfsd.h9
-rw-r--r--include/linux/nfsd/state.h3
-rw-r--r--include/linux/nfsd/xdr4.h7
-rw-r--r--include/linux/notifier.h3
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/raid/bitmap.h6
-rw-r--r--include/linux/raid/md_k.h2
-rw-r--r--include/linux/rtc/m48t59.h57
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/slab.h90
-rw-r--r--include/linux/slab_def.h34
-rw-r--r--include/linux/slub_def.h29
-rw-r--r--include/linux/smp.h13
-rw-r--r--include/linux/spi/spi.h1
-rw-r--r--include/linux/spi/spi_bitbang.h1
-rw-r--r--include/linux/spi/tle62x0.h24
-rw-r--r--include/linux/sunrpc/gss_api.h1
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svcauth.h1
-rw-r--r--include/linux/sunrpc/svcauth_gss.h1
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/vmstat.h5
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/net/scm.h2
-rw-r--r--include/video/tgafb.h1
-rw-r--r--init/Kconfig2
-rw-r--r--init/do_mounts_initrd.c7
-rw-r--r--ipc/msg.c4
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/auditfilter.c12
-rw-r--r--kernel/cpu.c16
-rw-r--r--kernel/cpuset.c3
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kallsyms.c16
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/ptrace.c19
-rw-r--r--kernel/rcutorture.c4
-rw-r--r--kernel/rtmutex-tester.c1
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/workqueue.c15
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Makefile1
-rw-r--r--lib/crc7.c68
-rw-r--r--lib/genalloc.c3
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c9
-rw-r--r--mm/filemap.c72
-rw-r--r--mm/highmem.c7
-rw-r--r--mm/hugetlb.c25
-rw-r--r--mm/memory.c9
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mempool.c3
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c292
-rw-r--r--mm/pdflush.c1
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c86
-rw-r--r--mm/slob.c56
-rw-r--r--mm/slub.c668
-rw-r--r--mm/swap_state.c3
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/util.c48
-rw-r--r--mm/vmalloc.c6
-rw-r--r--mm/vmscan.c212
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/cmtp/core.c2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/sunrpc/auth.c11
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c1
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c14
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c1
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c32
-rw-r--r--net/sunrpc/svcauth_unix.c7
-rw-r--r--scripts/kallsyms.c4
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--sound/oss/Kconfig77
-rw-r--r--sound/oss/trident.c367
-rw-r--r--sound/pci/mixart/mixart_hwdep.c1
571 files changed, 27007 insertions, 5479 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 46bcff2849bd..fd2ef4d29b6d 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -139,8 +139,10 @@ X!Ilib/string.c
139!Elib/cmdline.c 139!Elib/cmdline.c
140 </sect1> 140 </sect1>
141 141
142 <sect1><title>CRC Functions</title> 142 <sect1 id="crc"><title>CRC Functions</title>
143!Elib/crc7.c
143!Elib/crc16.c 144!Elib/crc16.c
145!Elib/crc-itu-t.c
144!Elib/crc32.c 146!Elib/crc32.c
145!Elib/crc-ccitt.c 147!Elib/crc-ccitt.c
146 </sect1> 148 </sect1>
diff --git a/Documentation/ecryptfs.txt b/Documentation/filesystems/ecryptfs.txt
index 01d8a08351ac..01d8a08351ac 100644
--- a/Documentation/ecryptfs.txt
+++ b/Documentation/filesystems/ecryptfs.txt
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 460b892d089e..ebffdffb3d99 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1348,6 +1348,21 @@ nr_hugepages configures number of hugetlb page reserved for the system.
1348hugetlb_shm_group contains group id that is allowed to create SysV shared 1348hugetlb_shm_group contains group id that is allowed to create SysV shared
1349memory segment using hugetlb page. 1349memory segment using hugetlb page.
1350 1350
1351hugepages_treat_as_movable
1352--------------------------
1353
1354This parameter is only useful when kernelcore= is specified at boot time to
1355create ZONE_MOVABLE for pages that may be reclaimed or migrated. Huge pages
1356are not movable so are not normally allocated from ZONE_MOVABLE. A non-zero
1357value written to hugepages_treat_as_movable allows huge pages to be allocated
1358from ZONE_MOVABLE.
1359
1360Once enabled, the ZONE_MOVABLE is treated as an area of memory the huge
1361pages pool can easily grow or shrink within. Assuming that applications are
1362not running that mlock() a lot of memory, it is likely the huge pages pool
1363can grow to the size of ZONE_MOVABLE by repeatedly entering the desired value
1364into nr_hugepages and triggering page reclaim.
1365
1351laptop_mode 1366laptop_mode
1352----------- 1367-----------
1353 1368
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8363ad3ba018..9a541486fb7e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -817,6 +817,32 @@ and is between 256 and 4096 characters. It is defined in the file
817 js= [HW,JOY] Analog joystick 817 js= [HW,JOY] Analog joystick
818 See Documentation/input/joystick.txt. 818 See Documentation/input/joystick.txt.
819 819
820 kernelcore=nn[KMG] [KNL,IA-32,IA-64,PPC,X86-64] This parameter
821 specifies the amount of memory usable by the kernel
822 for non-movable allocations. The requested amount is
823 spread evenly throughout all nodes in the system. The
824 remaining memory in each node is used for Movable
825 pages. In the event, a node is too small to have both
826 kernelcore and Movable pages, kernelcore pages will
827 take priority and other nodes will have a larger number
828 of kernelcore pages. The Movable zone is used for the
829 allocation of pages that may be reclaimed or moved
830 by the page migration subsystem. This means that
831 HugeTLB pages may not be allocated from this zone.
832 Note that allocations like PTEs-from-HighMem still
833 use the HighMem zone if it exists, and the Normal
834 zone if it does not.
835
836 movablecore=nn[KMG] [KNL,IA-32,IA-64,PPC,X86-64] This parameter
837 is similar to kernelcore except it specifies the
838 amount of memory used for migratable allocations.
839 If both kernelcore and movablecore is specified,
840 then kernelcore will be at *least* the specified
841 value but may be more. If movablecore on its own
842 is specified, the administrator must be careful
843 that the amount of memory usable for all allocations
844 is not too small.
845
820 keepinitrd [HW,ARM] 846 keepinitrd [HW,ARM]
821 847
822 kstack=N [IA-32,X86-64] Print N words from the kernel stack 848 kstack=N [IA-32,X86-64] Print N words from the kernel stack
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt
index 23e6dde7eea6..7f60dfe642ca 100644
--- a/Documentation/oops-tracing.txt
+++ b/Documentation/oops-tracing.txt
@@ -251,6 +251,8 @@ characters, each representing a particular tainted value.
251 7: 'U' if a user or user application specifically requested that the 251 7: 'U' if a user or user application specifically requested that the
252 Tainted flag be set, ' ' otherwise. 252 Tainted flag be set, ' ' otherwise.
253 253
254 8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG.
255
254The primary reason for the 'Tainted: ' string is to tell kernel 256The primary reason for the 'Tainted: ' string is to tell kernel
255debuggers if this is a clean kernel or if anything unusual has 257debuggers if this is a clean kernel or if anything unusual has
256occurred. Tainting is permanent: even if an offending module is 258occurred. Tainting is permanent: even if an offending module is
diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt
new file mode 100644
index 000000000000..af1a282c71a3
--- /dev/null
+++ b/Documentation/power/freezing-of-tasks.txt
@@ -0,0 +1,160 @@
1Freezing of tasks
2 (C) 2007 Rafael J. Wysocki <rjw@sisk.pl>, GPL
3
4I. What is the freezing of tasks?
5
6The freezing of tasks is a mechanism by which user space processes and some
7kernel threads are controlled during hibernation or system-wide suspend (on some
8architectures).
9
10II. How does it work?
11
12There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE
13and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have
14PF_NOFREEZE unset (all user space processes and some kernel threads) are
15regarded as 'freezable' and treated in a special way before the system enters a
16suspend state as well as before a hibernation image is created (in what follows
17we only consider hibernation, but the description also applies to suspend).
18
19Namely, as the first step of the hibernation procedure the function
20freeze_processes() (defined in kernel/power/process.c) is called. It executes
21try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
22sends a fake signal to each of them. A task that receives such a signal and has
23TIF_FREEZE set, should react to it by calling the refrigerator() function
24(defined in kernel/power/process.c), which sets the task's PF_FROZEN flag,
25changes its state to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is
26cleared for it. Then, we say that the task is 'frozen' and therefore the set of
27functions handling this mechanism is called 'the freezer' (these functions are
28defined in kernel/power/process.c and include/linux/freezer.h). User space
29processes are generally frozen before kernel threads.
30
31It is not recommended to call refrigerator() directly. Instead, it is
32recommended to use the try_to_freeze() function (defined in
33include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
34task enter refrigerator() if the flag is set.
35
36For user space processes try_to_freeze() is called automatically from the
37signal-handling code, but the freezable kernel threads need to call it
38explicitly in suitable places. The code to do this may look like the following:
39
40 do {
41 hub_events();
42 wait_event_interruptible(khubd_wait,
43 !list_empty(&hub_event_list));
44 try_to_freeze();
45 } while (!signal_pending(current));
46
47(from drivers/usb/core/hub.c::hub_thread()).
48
49If a freezable kernel thread fails to call try_to_freeze() after the freezer has
50set TIF_FREEZE for it, the freezing of tasks will fail and the entire
51hibernation operation will be cancelled. For this reason, freezable kernel
52threads must call try_to_freeze() somewhere.
53
54After the system memory state has been restored from a hibernation image and
55devices have been reinitialized, the function thaw_processes() is called in
56order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
57have been frozen leave refrigerator() and continue running.
58
59III. Which kernel threads are freezable?
60
61Kernel threads are not freezable by default. However, a kernel thread may clear
62PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
63directly is strongly discouraged). From this point it is regarded as freezable
64and must call try_to_freeze() in a suitable place.
65
66IV. Why do we do that?
67
68Generally speaking, there is a couple of reasons to use the freezing of tasks:
69
701. The principal reason is to prevent filesystems from being damaged after
71hibernation. At the moment we have no simple means of checkpointing
72filesystems, so if there are any modifications made to filesystem data and/or
73metadata on disks, we cannot bring them back to the state from before the
74modifications. At the same time each hibernation image contains some
75filesystem-related information that must be consistent with the state of the
76on-disk data and metadata after the system memory state has been restored from
77the image (otherwise the filesystems will be damaged in a nasty way, usually
78making them almost impossible to repair). We therefore freeze tasks that might
79cause the on-disk filesystems' data and metadata to be modified after the
80hibernation image has been created and before the system is finally powered off.
81The majority of these are user space processes, but if any of the kernel threads
82may cause something like this to happen, they have to be freezable.
83
842. The second reason is to prevent user space processes and some kernel threads
85from interfering with the suspending and resuming of devices. A user space
86process running on a second CPU while we are suspending devices may, for
87example, be troublesome and without the freezing of tasks we would need some
88safeguards against race conditions that might occur in such a case.
89
90Although Linus Torvalds doesn't like the freezing of tasks, he said this in one
91of the discussions on LKML (http://lkml.org/lkml/2007/4/27/608):
92
93"RJW:> Why we freeze tasks at all or why we freeze kernel threads?
94
95Linus: In many ways, 'at all'.
96
97I _do_ realize the IO request queue issues, and that we cannot actually do
98s2ram with some devices in the middle of a DMA. So we want to be able to
99avoid *that*, there's no question about that. And I suspect that stopping
100user threads and then waiting for a sync is practically one of the easier
101ways to do so.
102
103So in practice, the 'at all' may become a 'why freeze kernel threads?' and
104freezing user threads I don't find really objectionable."
105
106Still, there are kernel threads that may want to be freezable. For example, if
107a kernel that belongs to a device driver accesses the device directly, it in
108principle needs to know when the device is suspended, so that it doesn't try to
109access it at that time. However, if the kernel thread is freezable, it will be
110frozen before the driver's .suspend() callback is executed and it will be
111thawed after the driver's .resume() callback has run, so it won't be accessing
112the device while it's suspended.
113
1143. Another reason for freezing tasks is to prevent user space processes from
115realizing that hibernation (or suspend) operation takes place. Ideally, user
116space processes should not notice that such a system-wide operation has occurred
117and should continue running without any problems after the restore (or resume
118from suspend). Unfortunately, in the most general case this is quite difficult
119to achieve without the freezing of tasks. Consider, for example, a process
120that depends on all CPUs being online while it's running. Since we need to
121disable nonboot CPUs during the hibernation, if this process is not frozen, it
122may notice that the number of CPUs has changed and may start to work incorrectly
123because of that.
124
125V. Are there any problems related to the freezing of tasks?
126
127Yes, there are.
128
129First of all, the freezing of kernel threads may be tricky if they depend one
130on another. For example, if kernel thread A waits for a completion (in the
131TASK_UNINTERRUPTIBLE state) that needs to be done by freezable kernel thread B
132and B is frozen in the meantime, then A will be blocked until B is thawed, which
133may be undesirable. That's why kernel threads are not freezable by default.
134
135Second, there are the following two problems related to the freezing of user
136space processes:
1371. Putting processes into an uninterruptible sleep distorts the load average.
1382. Now that we have FUSE, plus the framework for doing device drivers in
139userspace, it gets even more complicated because some userspace processes are
140now doing the sorts of things that kernel threads do
141(https://lists.linux-foundation.org/pipermail/linux-pm/2007-May/012309.html).
142
143The problem 1. seems to be fixable, although it hasn't been fixed so far. The
144other one is more serious, but it seems that we can work around it by using
145hibernation (and suspend) notifiers (in that case, though, we won't be able to
146avoid the realization by the user space processes that the hibernation is taking
147place).
148
149There are also problems that the freezing of tasks tends to expose, although
150they are not directly related to it. For example, if request_firmware() is
151called from a device driver's .resume() routine, it will timeout and eventually
152fail, because the user land process that should respond to the request is frozen
153at this point. So, seemingly, the failure is due to the freezing of tasks.
154Suppose, however, that the firmware file is located on a filesystem accessible
155only through another device that hasn't been resumed yet. In that case,
156request_firmware() will fail regardless of whether or not the freezing of tasks
157is used. Consequently, the problem is not really related to the freezing of
158tasks, since it generally exists anyway. [The solution to this particular
159problem is to keep the firmware in memory after it's loaded for the first time
160and upload if from memory to the device whenever necessary.]
diff --git a/Documentation/power/kernel_threads.txt b/Documentation/power/kernel_threads.txt
deleted file mode 100644
index fb57784986b1..000000000000
--- a/Documentation/power/kernel_threads.txt
+++ /dev/null
@@ -1,40 +0,0 @@
1KERNEL THREADS
2
3
4Freezer
5
6Upon entering a suspended state the system will freeze all
7tasks. This is done by delivering pseudosignals. This affects
8kernel threads, too. To successfully freeze a kernel thread
9the thread has to check for the pseudosignal and enter the
10refrigerator. Code to do this looks like this:
11
12 do {
13 hub_events();
14 wait_event_interruptible(khubd_wait, !list_empty(&hub_event_list));
15 try_to_freeze();
16 } while (!signal_pending(current));
17
18from drivers/usb/core/hub.c::hub_thread()
19
20
21The Unfreezable
22
23Some kernel threads however, must not be frozen. The kernel must
24be able to finish pending IO operations and later on be able to
25write the memory image to disk. Kernel threads needed to do IO
26must stay awake. Such threads must mark themselves unfreezable
27like this:
28
29 /*
30 * This thread doesn't need any user-level access,
31 * so get rid of all our resources.
32 */
33 daemonize("usb-storage");
34
35 current->flags |= PF_NOFREEZE;
36
37from drivers/usb/storage/usb.c::usb_stor_control_thread()
38
39Such drivers are themselves responsible for staying quiet during
40the actual snapshotting.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index 152b510d1bbb..aea7e9209667 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -140,21 +140,11 @@ should be sent to the mailing list available through the suspend2
140website, and not to the Linux Kernel Mailing List. We are working 140website, and not to the Linux Kernel Mailing List. We are working
141toward merging suspend2 into the mainline kernel. 141toward merging suspend2 into the mainline kernel.
142 142
143Q: A kernel thread must voluntarily freeze itself (call 'refrigerator'). 143Q: What is the freezing of tasks and why are we using it?
144I found some kernel threads that don't do it, and they don't freeze
145so the system can't sleep. Is this a known behavior?
146
147A: All such kernel threads need to be fixed, one by one. Select the
148place where the thread is safe to be frozen (no kernel semaphores
149should be held at that point and it must be safe to sleep there), and
150add:
151
152 try_to_freeze();
153
154If the thread is needed for writing the image to storage, you should
155instead set the PF_NOFREEZE process flag when creating the thread (and
156be very careful).
157 144
145A: The freezing of tasks is a mechanism by which user space processes and some
146kernel threads are controlled during hibernation or system-wide suspend (on some
147architectures). See freezing-of-tasks.txt for details.
158 148
159Q: What is the difference between "platform" and "shutdown"? 149Q: What is the difference between "platform" and "shutdown"?
160 150
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index 7c701b88d6d5..c931d613f641 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -385,7 +385,7 @@ test_PIE:
385 /* not all RTCs support periodic IRQs */ 385 /* not all RTCs support periodic IRQs */
386 if (errno == ENOTTY) { 386 if (errno == ENOTTY) {
387 fprintf(stderr, "\nNo periodic IRQ support\n"); 387 fprintf(stderr, "\nNo periodic IRQ support\n");
388 return 0; 388 goto done;
389 } 389 }
390 perror("RTC_IRQP_READ ioctl"); 390 perror("RTC_IRQP_READ ioctl");
391 exit(errno); 391 exit(errno);
diff --git a/Documentation/spi/spi-lm70llp b/Documentation/spi/spi-lm70llp
new file mode 100644
index 000000000000..154bd02220b9
--- /dev/null
+++ b/Documentation/spi/spi-lm70llp
@@ -0,0 +1,69 @@
1spi_lm70llp : LM70-LLP parport-to-SPI adapter
2==============================================
3
4Supported board/chip:
5 * National Semiconductor LM70 LLP evaluation board
6 Datasheet: http://www.national.com/pf/LM/LM70.html
7
8Author:
9 Kaiwan N Billimoria <kaiwan@designergraphix.com>
10
11Description
12-----------
13This driver provides glue code connecting a National Semiconductor LM70 LLP
14temperature sensor evaluation board to the kernel's SPI core subsystem.
15
16In effect, this driver turns the parallel port interface on the eval board
17into a SPI bus with a single device, which will be driven by the generic
18LM70 driver (drivers/hwmon/lm70.c).
19
20The hardware interfacing on the LM70 LLP eval board is as follows:
21
22 Parallel LM70 LLP
23 Port Direction JP2 Header
24 ----------- --------- ----------------
25 D0 2 - -
26 D1 3 --> V+ 5
27 D2 4 --> V+ 5
28 D3 5 --> V+ 5
29 D4 6 --> V+ 5
30 D5 7 --> nCS 8
31 D6 8 --> SCLK 3
32 D7 9 --> SI/O 5
33 GND 25 - GND 7
34 Select 13 <-- SI/O 1
35 ----------- --------- ----------------
36
37Note that since the LM70 uses a "3-wire" variant of SPI, the SI/SO pin
38is connected to both pin D7 (as Master Out) and Select (as Master In)
39using an arrangment that lets either the parport or the LM70 pull the
40pin low. This can't be shared with true SPI devices, but other 3-wire
41devices might share the same SI/SO pin.
42
43The bitbanger routine in this driver (lm70_txrx) is called back from
44the bound "hwmon/lm70" protocol driver through its sysfs hook, using a
45spi_write_then_read() call. It performs Mode 0 (SPI/Microwire) bitbanging.
46The lm70 driver then inteprets the resulting digital temperature value
47and exports it through sysfs.
48
49A "gotcha": National Semiconductor's LM70 LLP eval board circuit schematic
50shows that the SI/O line from the LM70 chip is connected to the base of a
51transistor Q1 (and also a pullup, and a zener diode to D7); while the
52collector is tied to VCC.
53
54Interpreting this circuit, when the LM70 SI/O line is High (or tristate
55and not grounded by the host via D7), the transistor conducts and switches
56the collector to zero, which is reflected on pin 13 of the DB25 parport
57connector. When SI/O is Low (driven by the LM70 or the host) on the other
58hand, the transistor is cut off and the voltage tied to it's collector is
59reflected on pin 13 as a High level.
60
61So: the getmiso inline routine in this driver takes this fact into account,
62inverting the value read at pin 13.
63
64
65Thanks to
66---------
67o David Brownell for mentoring the SPI-side driver development.
68o Dr.Craig Hollabaugh for the (early) "manual" bitbanging driver version.
69o Nadir Billimoria for help interpreting the circuit schematic.
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index df3ff2095f9d..a0ccc5b60260 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -38,7 +38,8 @@ Currently, these files are in /proc/sys/vm:
38 38
39dirty_ratio, dirty_background_ratio, dirty_expire_centisecs, 39dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
40dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode, 40dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
41block_dump, swap_token_timeout, drop-caches: 41block_dump, swap_token_timeout, drop-caches,
42hugepages_treat_as_movable:
42 43
43See Documentation/filesystems/proc.txt 44See Documentation/filesystems/proc.txt
44 45
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index df812b03b65d..d17f324db9f5 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -127,13 +127,20 @@ SLUB Debug output
127 127
128Here is a sample of slub debug output: 128Here is a sample of slub debug output:
129 129
130*** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58 130====================================================================
131 Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ 131BUG kmalloc-8: Redzone overwritten
132 Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005 132--------------------------------------------------------------------
133 Redzone 0xc90f6d28: 00 cc cc cc . 133
134FreePointer 0xc90f6d2c -> 0xc90f6d58 134INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
135Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554 135INFO: Slab 0xc528c530 flags=0x400000c3 inuse=61 fp=0xc90f6d58
136Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ 136INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
137INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
138
139Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
140 Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
141 Redzone 0xc90f6d28: 00 cc cc cc .
142 Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
143
137 [<c010523d>] dump_trace+0x63/0x1eb 144 [<c010523d>] dump_trace+0x63/0x1eb
138 [<c01053df>] show_trace_log_lvl+0x1a/0x2f 145 [<c01053df>] show_trace_log_lvl+0x1a/0x2f
139 [<c010601d>] show_trace+0x12/0x14 146 [<c010601d>] show_trace+0x12/0x14
@@ -155,74 +162,108 @@ Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
155 [<c0104112>] sysenter_past_esp+0x5f/0x99 162 [<c0104112>] sysenter_past_esp+0x5f/0x99
156 [<b7f7b410>] 0xb7f7b410 163 [<b7f7b410>] 0xb7f7b410
157 ======================= 164 =======================
158@@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b
159 165
166FIX kmalloc-8: Restoring Redzone 0xc90f6d28-0xc90f6d2b=0xcc
160 167
168If SLUB encounters a corrupted object (full detection requires the kernel
169to be booted with slub_debug) then the following output will be dumped
170into the syslog:
161 171
162If SLUB encounters a corrupted object then it will perform the following 1721. Description of the problem encountered
163actions:
164
1651. Isolation and report of the issue
166 173
167This will be a message in the system log starting with 174This will be a message in the system log starting with
168 175
169*** SLUB <slab cache affected>: <What went wrong>@<object address> 176===============================================
170offset=<offset of object into slab> flags=<slabflags> 177BUG <slab cache affected>: <What went wrong>
171inuse=<objects in use in this slab> freelist=<first free object in slab> 178-----------------------------------------------
172 179
1732. Report on how the problem was dealt with in order to ensure the continued 180INFO: <corruption start>-<corruption_end> <more info>
174operation of the system. 181INFO: Slab <address> <slab information>
182INFO: Object <address> <object information>
183INFO: Allocated in <kernel function> age=<jiffies since alloc> cpu=<allocated by
184 cpu> pid=<pid of the process>
185INFO: Freed in <kernel function> age=<jiffies since free> cpu=<freed by cpu>
186 pid=<pid of the process>
175 187
176These are messages in the system log beginning with 188(Object allocation / free information is only available if SLAB_STORE_USER is
177 189set for the slab. slub_debug sets that option)
178@@@ SLUB <slab cache affected>: <corrective action taken>
179 190
1912. The object contents if an object was involved.
180 192
181In the above sample SLUB found that the Redzone of an active object has 193Various types of lines can follow the BUG SLUB line:
182been overwritten. Here a string of 8 characters was written into a slab that
183has the length of 8 characters. However, a 8 character string needs a
184terminating 0. That zero has overwritten the first byte of the Redzone field.
185After reporting the details of the issue encountered the @@@ SLUB message
186tell us that SLUB has restored the redzone to its proper value and then
187system operations continue.
188
189Various types of lines can follow the @@@ SLUB line:
190 194
191Bytes b4 <address> : <bytes> 195Bytes b4 <address> : <bytes>
192 Show a few bytes before the object where the problem was detected. 196 Shows a few bytes before the object where the problem was detected.
193 Can be useful if the corruption does not stop with the start of the 197 Can be useful if the corruption does not stop with the start of the
194 object. 198 object.
195 199
196Object <address> : <bytes> 200Object <address> : <bytes>
197 The bytes of the object. If the object is inactive then the bytes 201 The bytes of the object. If the object is inactive then the bytes
198 typically contain poisoning values. Any non-poison value shows a 202 typically contain poison values. Any non-poison value shows a
199 corruption by a write after free. 203 corruption by a write after free.
200 204
201Redzone <address> : <bytes> 205Redzone <address> : <bytes>
202 The redzone following the object. The redzone is used to detect 206 The Redzone following the object. The Redzone is used to detect
203 writes after the object. All bytes should always have the same 207 writes after the object. All bytes should always have the same
204 value. If there is any deviation then it is due to a write after 208 value. If there is any deviation then it is due to a write after
205 the object boundary. 209 the object boundary.
206 210
207Freepointer 211 (Redzone information is only available if SLAB_RED_ZONE is set.
208 The pointer to the next free object in the slab. May become 212 slub_debug sets that option)
209 corrupted if overwriting continues after the red zone.
210
211Last alloc:
212Last free:
213 Shows the address from which the object was allocated/freed last.
214 We note the pid, the time and the CPU that did so. This is usually
215 the most useful information to figure out where things went wrong.
216 Here get_modalias() did an kmalloc(8) instead of a kmalloc(9).
217 213
218Filler <address> : <bytes> 214Padding <address> : <bytes>
219 Unused data to fill up the space in order to get the next object 215 Unused data to fill up the space in order to get the next object
220 properly aligned. In the debug case we make sure that there are 216 properly aligned. In the debug case we make sure that there are
221 at least 4 bytes of filler. This allow for the detection of writes 217 at least 4 bytes of padding. This allows the detection of writes
222 before the object. 218 before the object.
223 219
224Following the filler will be a stackdump. That stackdump describes the 2203. A stackdump
225location where the error was detected. The cause of the corruption is more 221
226likely to be found by looking at the information about the last alloc / free. 222The stackdump describes the location where the error was detected. The cause
223of the corruption is may be more likely found by looking at the function that
224allocated or freed the object.
225
2264. Report on how the problem was dealt with in order to ensure the continued
227operation of the system.
228
229These are messages in the system log beginning with
230
231FIX <slab cache affected>: <corrective action taken>
232
233In the above sample SLUB found that the Redzone of an active object has
234been overwritten. Here a string of 8 characters was written into a slab that
235has the length of 8 characters. However, a 8 character string needs a
236terminating 0. That zero has overwritten the first byte of the Redzone field.
237After reporting the details of the issue encountered the FIX SLUB message
238tell us that SLUB has restored the Redzone to its proper value and then
239system operations continue.
240
241Emergency operations:
242---------------------
243
244Minimal debugging (sanity checks alone) can be enabled by booting with
245
246 slub_debug=F
247
248This will be generally be enough to enable the resiliency features of slub
249which will keep the system running even if a bad kernel component will
250keep corrupting objects. This may be important for production systems.
251Performance will be impacted by the sanity checks and there will be a
252continual stream of error messages to the syslog but no additional memory
253will be used (unlike full debugging).
254
255No guarantees. The kernel component still needs to be fixed. Performance
256may be optimized further by locating the slab that experiences corruption
257and enabling debugging only for that cache
258
259I.e.
260
261 slub_debug=F,dentry
262
263If the corruption occurs by writing after the end of the object then it
264may be advisable to enable a Redzone to avoid corrupting the beginning
265of other objects.
266
267 slub_debug=FZ,dentry
227 268
228Christoph Lameter, <clameter@sgi.com>, May 23, 2007 269Christoph Lameter, <clameter@sgi.com>, May 30, 2007
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index 0cd060598f9a..83a781842266 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -315,9 +315,7 @@ do_sys_ptrace(long request, long pid, long addr, long data,
315 /* When I and D space are separate, this will have to be fixed. */ 315 /* When I and D space are separate, this will have to be fixed. */
316 case PTRACE_POKETEXT: /* write the word at location addr. */ 316 case PTRACE_POKETEXT: /* write the word at location addr. */
317 case PTRACE_POKEDATA: 317 case PTRACE_POKEDATA:
318 tmp = data; 318 ret = generic_ptrace_pokedata(child, addr, data);
319 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
320 ret = (copied == sizeof(tmp)) ? 0 : -EIO;
321 break; 319 break;
322 320
323 case PTRACE_POKEUSR: /* write the specified register */ 321 case PTRACE_POKEUSR: /* write the specified register */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 80cfb758ee2b..b28731437c31 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -65,7 +65,7 @@ enum ipi_message_type {
65}; 65};
66 66
67/* Set to a secondary's cpuid when it comes online. */ 67/* Set to a secondary's cpuid when it comes online. */
68static int smp_secondary_alive __initdata = 0; 68static int smp_secondary_alive __devinitdata = 0;
69 69
70/* Which cpus ids came online. */ 70/* Which cpus ids came online. */
71cpumask_t cpu_online_map; 71cpumask_t cpu_online_map;
@@ -173,7 +173,7 @@ smp_callin(void)
173} 173}
174 174
175/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ 175/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
176static int __init 176static int __devinit
177wait_for_txrdy (unsigned long cpumask) 177wait_for_txrdy (unsigned long cpumask)
178{ 178{
179 unsigned long timeout; 179 unsigned long timeout;
@@ -358,7 +358,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
358/* 358/*
359 * Bring one cpu online. 359 * Bring one cpu online.
360 */ 360 */
361static int __init 361static int __devinit
362smp_boot_one_cpu(int cpuid) 362smp_boot_one_cpu(int cpuid)
363{ 363{
364 struct task_struct *idle; 364 struct task_struct *idle;
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index d6e665d567bd..ec0f05e0d8ff 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -184,6 +184,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
184#endif 184#endif
185 printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); 185 printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err);
186 dik_show_regs(regs, r9_15); 186 dik_show_regs(regs, r9_15);
187 add_taint(TAINT_DIE);
187 dik_show_trace((unsigned long *)(regs+1)); 188 dik_show_trace((unsigned long *)(regs+1));
188 dik_show_code((unsigned int *)regs->pc); 189 dik_show_code((unsigned int *)regs->pc);
189 190
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index ab3761c437a8..8698e0746f9f 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -69,6 +69,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
69 result = (result & 0xffffffff) + (result >> 32); 69 result = (result & 0xffffffff) + (result >> 32);
70 return (__force __wsum)result; 70 return (__force __wsum)result;
71} 71}
72EXPORT_SYMBOL(csum_tcpudp_nofold);
72 73
73/* 74/*
74 * Do a 64-bit checksum on an arbitrary memory area.. 75 * Do a 64-bit checksum on an arbitrary memory area..
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 6f2f46c2e406..78c9f1a3d41f 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -657,7 +657,6 @@ static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
657 657
658long arch_ptrace(struct task_struct *child, long request, long addr, long data) 658long arch_ptrace(struct task_struct *child, long request, long addr, long data)
659{ 659{
660 unsigned long tmp;
661 int ret; 660 int ret;
662 661
663 switch (request) { 662 switch (request) {
@@ -666,12 +665,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
666 */ 665 */
667 case PTRACE_PEEKTEXT: 666 case PTRACE_PEEKTEXT:
668 case PTRACE_PEEKDATA: 667 case PTRACE_PEEKDATA:
669 ret = access_process_vm(child, addr, &tmp, 668 ret = generic_ptrace_peekdata(child, addr, data);
670 sizeof(unsigned long), 0);
671 if (ret == sizeof(unsigned long))
672 ret = put_user(tmp, (unsigned long __user *) data);
673 else
674 ret = -EIO;
675 break; 669 break;
676 670
677 case PTRACE_PEEKUSR: 671 case PTRACE_PEEKUSR:
@@ -683,12 +677,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
683 */ 677 */
684 case PTRACE_POKETEXT: 678 case PTRACE_POKETEXT:
685 case PTRACE_POKEDATA: 679 case PTRACE_POKEDATA:
686 ret = access_process_vm(child, addr, &data, 680 ret = generic_ptrace_pokedata(child, addr, data);
687 sizeof(unsigned long), 1);
688 if (ret == sizeof(unsigned long))
689 ret = 0;
690 else
691 ret = -EIO;
692 break; 681 break;
693 682
694 case PTRACE_POKEUSR: 683 case PTRACE_POKEUSR:
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 237f4999b9a1..f2114bcf09d5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -249,6 +249,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
249 bust_spinlocks(1); 249 bust_spinlocks(1);
250 __die(str, err, thread, regs); 250 __die(str, err, thread, regs);
251 bust_spinlocks(0); 251 bust_spinlocks(0);
252 add_taint(TAINT_DIE);
252 spin_unlock_irq(&die_lock); 253 spin_unlock_irq(&die_lock);
253 254
254 if (in_interrupt()) 255 if (in_interrupt())
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index e18a41e61f0c..dde089922e3b 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/i2c.h>
26#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
27#include <linux/mtd/physmap.h> 28#include <linux/mtd/physmap.h>
28 29
@@ -83,6 +84,13 @@ static struct at91_udc_data __initdata csb337_udc_data = {
83 .pullup_pin = AT91_PIN_PA24, 84 .pullup_pin = AT91_PIN_PA24,
84}; 85};
85 86
87static struct i2c_board_info __initdata csb337_i2c_devices[] = {
88 { I2C_BOARD_INFO("rtc-ds1307", 0x68),
89 .type = "ds1307",
90 },
91};
92
93
86static struct at91_cf_data __initdata csb337_cf_data = { 94static struct at91_cf_data __initdata csb337_cf_data = {
87 /* 95 /*
88 * connector P4 on the CSB 337 mates to 96 * connector P4 on the CSB 337 mates to
@@ -161,6 +169,8 @@ static void __init csb337_board_init(void)
161 at91_add_device_udc(&csb337_udc_data); 169 at91_add_device_udc(&csb337_udc_data);
162 /* I2C */ 170 /* I2C */
163 at91_add_device_i2c(); 171 at91_add_device_i2c();
172 i2c_register_board_info(0, csb337_i2c_devices,
173 ARRAY_SIZE(csb337_i2c_devices));
164 /* Compact Flash */ 174 /* Compact Flash */
165 at91_set_gpio_input(AT91_PIN_PB22, 1); /* IOIS16 */ 175 at91_set_gpio_input(AT91_PIN_PB22, 1); /* IOIS16 */
166 at91_add_device_cf(&csb337_cf_data); 176 at91_add_device_cf(&csb337_cf_data);
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 390a97d39e5a..1873bd8cd1b2 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -25,6 +25,7 @@
25#include <linux/serial_core.h> 25#include <linux/serial_core.h>
26#include <linux/serial_8250.h> 26#include <linux/serial_8250.h>
27#include <linux/mtd/physmap.h> 27#include <linux/mtd/physmap.h>
28#include <linux/i2c.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/reboot.h> 30#include <linux/reboot.h>
30#include <asm/hardware.h> 31#include <asm/hardware.h>
@@ -199,6 +200,12 @@ static struct platform_device n2100_serial_device = {
199 .resource = &n2100_uart_resource, 200 .resource = &n2100_uart_resource,
200}; 201};
201 202
203static struct i2c_board_info __initdata n2100_i2c_devices[] = {
204 {
205 I2C_BOARD_INFO("rtc-rs5c372", 0x32),
206 .type = "rs5c372b",
207 },
208};
202 209
203/* 210/*
204 * Pull PCA9532 GPIO #8 low to power off the machine. 211 * Pull PCA9532 GPIO #8 low to power off the machine.
@@ -248,6 +255,9 @@ static void __init n2100_init_machine(void)
248 platform_device_register(&iop3xx_dma_0_channel); 255 platform_device_register(&iop3xx_dma_0_channel);
249 platform_device_register(&iop3xx_dma_1_channel); 256 platform_device_register(&iop3xx_dma_1_channel);
250 257
258 i2c_register_board_info(0, n2100_i2c_devices,
259 ARRAY_SIZE(n2100_i2c_devices));
260
251 pm_power_off = n2100_power_off; 261 pm_power_off = n2100_power_off;
252 262
253 init_timer(&power_button_poll_timer); 263 init_timer(&power_button_poll_timer);
diff --git a/arch/arm26/kernel/ptrace.c b/arch/arm26/kernel/ptrace.c
index 416927956721..0fefb86970c6 100644
--- a/arch/arm26/kernel/ptrace.c
+++ b/arch/arm26/kernel/ptrace.c
@@ -531,7 +531,6 @@ static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
531 531
532long arch_ptrace(struct task_struct *child, long request, long addr, long data) 532long arch_ptrace(struct task_struct *child, long request, long addr, long data)
533{ 533{
534 unsigned long tmp;
535 int ret; 534 int ret;
536 535
537 switch (request) { 536 switch (request) {
@@ -540,12 +539,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
540 */ 539 */
541 case PTRACE_PEEKTEXT: 540 case PTRACE_PEEKTEXT:
542 case PTRACE_PEEKDATA: 541 case PTRACE_PEEKDATA:
543 ret = access_process_vm(child, addr, &tmp, 542 ret = generic_ptrace_peekdata(child, addr, data);
544 sizeof(unsigned long), 0);
545 if (ret == sizeof(unsigned long))
546 ret = put_user(tmp, (unsigned long *) data);
547 else
548 ret = -EIO;
549 break; 543 break;
550 544
551 case PTRACE_PEEKUSR: 545 case PTRACE_PEEKUSR:
@@ -557,12 +551,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
557 */ 551 */
558 case PTRACE_POKETEXT: 552 case PTRACE_POKETEXT:
559 case PTRACE_POKEDATA: 553 case PTRACE_POKEDATA:
560 ret = access_process_vm(child, addr, &data, 554 ret = generic_ptrace_pokedata(child, addr, data);
561 sizeof(unsigned long), 1);
562 if (ret == sizeof(unsigned long))
563 ret = 0;
564 else
565 ret = -EIO;
566 break; 555 break;
567 556
568 case PTRACE_POKEUSR: 557 case PTRACE_POKEUSR:
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c
index d594fb59e945..2911e2eae80e 100644
--- a/arch/arm26/kernel/traps.c
+++ b/arch/arm26/kernel/traps.c
@@ -185,6 +185,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
185 printk("Internal error: %s: %x\n", str, err); 185 printk("Internal error: %s: %x\n", str, err);
186 printk("CPU: %d\n", smp_processor_id()); 186 printk("CPU: %d\n", smp_processor_id());
187 show_regs(regs); 187 show_regs(regs);
188 add_taint(TAINT_DIE);
188 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 189 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
189 current->comm, current->pid, end_of_stack(tsk)); 190 current->comm, current->pid, end_of_stack(tsk));
190 191
diff --git a/arch/avr32/kernel/ptrace.c b/arch/avr32/kernel/ptrace.c
index 3c36c2d16148..39060cbeb2a3 100644
--- a/arch/avr32/kernel/ptrace.c
+++ b/arch/avr32/kernel/ptrace.c
@@ -153,7 +153,6 @@ static int ptrace_setregs(struct task_struct *tsk, const void __user *uregs)
153 153
154long arch_ptrace(struct task_struct *child, long request, long addr, long data) 154long arch_ptrace(struct task_struct *child, long request, long addr, long data)
155{ 155{
156 unsigned long tmp;
157 int ret; 156 int ret;
158 157
159 pr_debug("arch_ptrace(%ld, %d, %#lx, %#lx)\n", 158 pr_debug("arch_ptrace(%ld, %d, %#lx, %#lx)\n",
@@ -166,11 +165,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
166 /* Read the word at location addr in the child process */ 165 /* Read the word at location addr in the child process */
167 case PTRACE_PEEKTEXT: 166 case PTRACE_PEEKTEXT:
168 case PTRACE_PEEKDATA: 167 case PTRACE_PEEKDATA:
169 ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 168 ret = generic_ptrace_peekdata(child, addr, data);
170 if (ret == sizeof(tmp))
171 ret = put_user(tmp, (unsigned long __user *)data);
172 else
173 ret = -EIO;
174 break; 169 break;
175 170
176 case PTRACE_PEEKUSR: 171 case PTRACE_PEEKUSR:
@@ -181,11 +176,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
181 /* Write the word in data at location addr */ 176 /* Write the word in data at location addr */
182 case PTRACE_POKETEXT: 177 case PTRACE_POKETEXT:
183 case PTRACE_POKEDATA: 178 case PTRACE_POKEDATA:
184 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 179 ret = generic_ptrace_pokedata(child, addr, data);
185 if (ret == sizeof(data))
186 ret = 0;
187 else
188 ret = -EIO;
189 break; 180 break;
190 181
191 case PTRACE_POKEUSR: 182 case PTRACE_POKEUSR:
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index aaa792815cd7..9a73ce7eb50f 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -56,6 +56,7 @@ void NORET_TYPE die(const char *str, struct pt_regs *regs, long err)
56 show_regs_log_lvl(regs, KERN_EMERG); 56 show_regs_log_lvl(regs, KERN_EMERG);
57 show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG); 57 show_stack_log_lvl(current, regs->sp, regs, KERN_EMERG);
58 bust_spinlocks(0); 58 bust_spinlocks(0);
59 add_taint(TAINT_DIE);
59 spin_unlock_irq(&die_lock); 60 spin_unlock_irq(&die_lock);
60 61
61 if (in_interrupt()) 62 if (in_interrupt())
diff --git a/arch/cris/arch-v10/kernel/ptrace.c b/arch/cris/arch-v10/kernel/ptrace.c
index fd2129a04586..f4f9db698b44 100644
--- a/arch/cris/arch-v10/kernel/ptrace.c
+++ b/arch/cris/arch-v10/kernel/ptrace.c
@@ -83,19 +83,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
83 switch (request) { 83 switch (request) {
84 /* Read word at location address. */ 84 /* Read word at location address. */
85 case PTRACE_PEEKTEXT: 85 case PTRACE_PEEKTEXT:
86 case PTRACE_PEEKDATA: { 86 case PTRACE_PEEKDATA:
87 unsigned long tmp; 87 ret = generic_ptrace_peekdata(child, addr, data);
88 int copied;
89
90 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
91 ret = -EIO;
92
93 if (copied != sizeof(tmp))
94 break;
95
96 ret = put_user(tmp,datap);
97 break; 88 break;
98 }
99 89
100 /* Read the word at location address in the USER area. */ 90 /* Read the word at location address in the USER area. */
101 case PTRACE_PEEKUSR: { 91 case PTRACE_PEEKUSR: {
@@ -113,12 +103,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
113 /* Write the word at location address. */ 103 /* Write the word at location address. */
114 case PTRACE_POKETEXT: 104 case PTRACE_POKETEXT:
115 case PTRACE_POKEDATA: 105 case PTRACE_POKEDATA:
116 ret = 0; 106 ret = generic_ptrace_pokedata(child, addr, data);
117
118 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
119 break;
120
121 ret = -EIO;
122 break; 107 break;
123 108
124 /* Write the word at location address in the USER area. */ 109 /* Write the word at location address in the USER area. */
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index d4d57b741334..38ece0cd47cb 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -146,12 +146,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
146 /* Write the word at location address. */ 146 /* Write the word at location address. */
147 case PTRACE_POKETEXT: 147 case PTRACE_POKETEXT:
148 case PTRACE_POKEDATA: 148 case PTRACE_POKEDATA:
149 ret = 0; 149 ret = generic_ptrace_pokedata(child, addr, data);
150
151 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
152 break;
153
154 ret = -EIO;
155 break; 150 break;
156 151
157 /* Write the word at location address in the USER area. */ 152 /* Write the word at location address in the USER area. */
diff --git a/arch/frv/kernel/ptrace.c b/arch/frv/kernel/ptrace.c
index ce88fb95ee59..709e9bdc6126 100644
--- a/arch/frv/kernel/ptrace.c
+++ b/arch/frv/kernel/ptrace.c
@@ -112,20 +112,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
112 switch (request) { 112 switch (request) {
113 /* when I and D space are separate, these will need to be fixed. */ 113 /* when I and D space are separate, these will need to be fixed. */
114 case PTRACE_PEEKTEXT: /* read word at location addr. */ 114 case PTRACE_PEEKTEXT: /* read word at location addr. */
115 case PTRACE_PEEKDATA: { 115 case PTRACE_PEEKDATA:
116 int copied;
117
118 ret = -EIO; 116 ret = -EIO;
119 if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0) 117 if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
120 break; 118 break;
121 119 ret = generic_ptrace_peekdata(child, addr, data);
122 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
123 if (copied != sizeof(tmp))
124 break;
125
126 ret = put_user(tmp,(unsigned long *) data);
127 break; 120 break;
128 }
129 121
130 /* read the word at location addr in the USER area. */ 122 /* read the word at location addr in the USER area. */
131 case PTRACE_PEEKUSR: { 123 case PTRACE_PEEKUSR: {
@@ -176,9 +168,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
176 ret = -EIO; 168 ret = -EIO;
177 if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0) 169 if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
178 break; 170 break;
179 if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data)) 171 ret = generic_ptrace_pokedata(child, addr, data);
180 break;
181 ret = 0;
182 break; 172 break;
183 173
184 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 174 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 8a7a991b8f76..d32bbf02fc48 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -111,10 +111,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
111 /* when I and D space are separate, this will have to be fixed. */ 111 /* when I and D space are separate, this will have to be fixed. */
112 case PTRACE_POKETEXT: /* write the word at location addr. */ 112 case PTRACE_POKETEXT: /* write the word at location addr. */
113 case PTRACE_POKEDATA: 113 case PTRACE_POKEDATA:
114 ret = 0; 114 ret = generic_ptrace_pokedata(child, addr, data);
115 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
116 break;
117 ret = -EIO;
118 break; 115 break;
119 116
120 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 117 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 8531a540ca8c..c7c9c2a15fab 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1222,8 +1222,8 @@ if INSTRUMENTATION
1222source "arch/i386/oprofile/Kconfig" 1222source "arch/i386/oprofile/Kconfig"
1223 1223
1224config KPROBES 1224config KPROBES
1225 bool "Kprobes (EXPERIMENTAL)" 1225 bool "Kprobes"
1226 depends on KALLSYMS && EXPERIMENTAL && MODULES 1226 depends on KALLSYMS && MODULES
1227 help 1227 help
1228 Kprobes allows you to trap at almost any kernel address and 1228 Kprobes allows you to trap at almost any kernel address and
1229 execute a callback function. register_kprobe() establishes 1229 execute a callback function. register_kprobe() establishes
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index bd28f9f9b4b7..181cc29a7c4f 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -108,6 +108,7 @@ drivers-$(CONFIG_PCI) += arch/i386/pci/
108# must be linked after kernel/ 108# must be linked after kernel/
109drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/ 109drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
110drivers-$(CONFIG_PM) += arch/i386/power/ 110drivers-$(CONFIG_PM) += arch/i386/power/
111drivers-$(CONFIG_FB) += arch/i386/video/
111 112
112CFLAGS += $(mflags-y) 113CFLAGS += $(mflags-y)
113AFLAGS += $(mflags-y) 114AFLAGS += $(mflags-y)
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 4112afe712b9..47001d50a083 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -222,6 +222,7 @@
222#include <linux/capability.h> 222#include <linux/capability.h>
223#include <linux/device.h> 223#include <linux/device.h>
224#include <linux/kernel.h> 224#include <linux/kernel.h>
225#include <linux/freezer.h>
225#include <linux/smp.h> 226#include <linux/smp.h>
226#include <linux/dmi.h> 227#include <linux/dmi.h>
227#include <linux/suspend.h> 228#include <linux/suspend.h>
@@ -2311,7 +2312,6 @@ static int __init apm_init(void)
2311 remove_proc_entry("apm", NULL); 2312 remove_proc_entry("apm", NULL);
2312 return err; 2313 return err;
2313 } 2314 }
2314 kapmd_task->flags |= PF_NOFREEZE;
2315 wake_up_process(kapmd_task); 2315 wake_up_process(kapmd_task);
2316 2316
2317 if (num_online_cpus() > 1 && !smp ) { 2317 if (num_online_cpus() > 1 && !smp ) {
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 7ba7c3abd3a4..1203dc5ab87a 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -134,19 +134,21 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
134 int err; 134 int err;
135 135
136 sys_dev = get_cpu_sysdev(cpu); 136 sys_dev = get_cpu_sysdev(cpu);
137 mutex_lock(&therm_cpu_lock);
138 switch (action) { 137 switch (action) {
139 case CPU_ONLINE: 138 case CPU_ONLINE:
140 case CPU_ONLINE_FROZEN: 139 case CPU_ONLINE_FROZEN:
140 mutex_lock(&therm_cpu_lock);
141 err = thermal_throttle_add_dev(sys_dev); 141 err = thermal_throttle_add_dev(sys_dev);
142 mutex_unlock(&therm_cpu_lock);
142 WARN_ON(err); 143 WARN_ON(err);
143 break; 144 break;
144 case CPU_DEAD: 145 case CPU_DEAD:
145 case CPU_DEAD_FROZEN: 146 case CPU_DEAD_FROZEN:
147 mutex_lock(&therm_cpu_lock);
146 thermal_throttle_remove_dev(sys_dev); 148 thermal_throttle_remove_dev(sys_dev);
149 mutex_unlock(&therm_cpu_lock);
147 break; 150 break;
148 } 151 }
149 mutex_unlock(&therm_cpu_lock);
150 return NOTIFY_OK; 152 return NOTIFY_OK;
151} 153}
152 154
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c
index a1808022ea19..2452c6fbe992 100644
--- a/arch/i386/kernel/efi.c
+++ b/arch/i386/kernel/efi.c
@@ -278,7 +278,7 @@ void efi_memmap_walk(efi_freemem_callback_t callback, void *arg)
278 struct range { 278 struct range {
279 unsigned long start; 279 unsigned long start;
280 unsigned long end; 280 unsigned long end;
281 } prev, curr; 281 } uninitialized_var(prev), curr;
282 efi_memory_desc_t *md; 282 efi_memory_desc_t *md;
283 unsigned long start, end; 283 unsigned long start, end;
284 void *p; 284 void *p;
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 7f8b7af2b95f..21db8f56c9a1 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -667,6 +667,7 @@ static int balanced_irq(void *unused)
667 set_pending_irq(i, cpumask_of_cpu(0)); 667 set_pending_irq(i, cpumask_of_cpu(0));
668 } 668 }
669 669
670 set_freezable();
670 for ( ; ; ) { 671 for ( ; ; ) {
671 time_remaining = schedule_timeout_interruptible(time_remaining); 672 time_remaining = schedule_timeout_interruptible(time_remaining);
672 try_to_freeze(); 673 try_to_freeze();
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index fba121f7973f..03b7f5584d71 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -295,7 +295,7 @@ static unsigned int
295 last_irq_sums [NR_CPUS], 295 last_irq_sums [NR_CPUS],
296 alert_counter [NR_CPUS]; 296 alert_counter [NR_CPUS];
297 297
298void touch_nmi_watchdog (void) 298void touch_nmi_watchdog(void)
299{ 299{
300 if (nmi_watchdog > 0) { 300 if (nmi_watchdog > 0) {
301 unsigned cpu; 301 unsigned cpu;
@@ -304,8 +304,10 @@ void touch_nmi_watchdog (void)
304 * Just reset the alert counters, (other CPUs might be 304 * Just reset the alert counters, (other CPUs might be
305 * spinning on locks we hold): 305 * spinning on locks we hold):
306 */ 306 */
307 for_each_present_cpu (cpu) 307 for_each_present_cpu(cpu) {
308 alert_counter[cpu] = 0; 308 if (alert_counter[cpu])
309 alert_counter[cpu] = 0;
310 }
309 } 311 }
310 312
311 /* 313 /*
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0c0ceec5de00..1c075f58d1f9 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -358,17 +358,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
358 switch (request) { 358 switch (request) {
359 /* when I and D space are separate, these will need to be fixed. */ 359 /* when I and D space are separate, these will need to be fixed. */
360 case PTRACE_PEEKTEXT: /* read word at location addr. */ 360 case PTRACE_PEEKTEXT: /* read word at location addr. */
361 case PTRACE_PEEKDATA: { 361 case PTRACE_PEEKDATA:
362 unsigned long tmp; 362 ret = generic_ptrace_peekdata(child, addr, data);
363 int copied;
364
365 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
366 ret = -EIO;
367 if (copied != sizeof(tmp))
368 break;
369 ret = put_user(tmp, datap);
370 break; 363 break;
371 }
372 364
373 /* read the word at location addr in the USER area. */ 365 /* read the word at location addr in the USER area. */
374 case PTRACE_PEEKUSR: { 366 case PTRACE_PEEKUSR: {
@@ -395,10 +387,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
395 /* when I and D space are separate, this will have to be fixed. */ 387 /* when I and D space are separate, this will have to be fixed. */
396 case PTRACE_POKETEXT: /* write the word at location addr. */ 388 case PTRACE_POKETEXT: /* write the word at location addr. */
397 case PTRACE_POKEDATA: 389 case PTRACE_POKEDATA:
398 ret = 0; 390 ret = generic_ptrace_pokedata(child, addr, data);
399 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
400 break;
401 ret = -EIO;
402 break; 391 break;
403 392
404 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 393 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/i386/kernel/smpcommon.c b/arch/i386/kernel/smpcommon.c
index 1868ae18eb4d..bbfe85a0f699 100644
--- a/arch/i386/kernel/smpcommon.c
+++ b/arch/i386/kernel/smpcommon.c
@@ -47,7 +47,7 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
47EXPORT_SYMBOL(smp_call_function); 47EXPORT_SYMBOL(smp_call_function);
48 48
49/** 49/**
50 * smp_call_function_single - Run a function on another CPU 50 * smp_call_function_single - Run a function on a specific CPU
51 * @cpu: The target CPU. Cannot be the calling CPU. 51 * @cpu: The target CPU. Cannot be the calling CPU.
52 * @func: The function to run. This must be fast and non-blocking. 52 * @func: The function to run. This must be fast and non-blocking.
53 * @info: An arbitrary pointer to pass to the function. 53 * @info: An arbitrary pointer to pass to the function.
@@ -66,9 +66,11 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
66 int ret; 66 int ret;
67 int me = get_cpu(); 67 int me = get_cpu();
68 if (cpu == me) { 68 if (cpu == me) {
69 WARN_ON(1); 69 local_irq_disable();
70 func(info);
71 local_irq_enable();
70 put_cpu(); 72 put_cpu();
71 return -EBUSY; 73 return 0;
72 } 74 }
73 75
74 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); 76 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 28bd1c5163ec..18c1c285836d 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -433,6 +433,7 @@ void die(const char * str, struct pt_regs * regs, long err)
433 433
434 bust_spinlocks(0); 434 bust_spinlocks(0);
435 die.lock_owner = -1; 435 die.lock_owner = -1;
436 add_taint(TAINT_DIE);
436 spin_unlock_irqrestore(&die.lock, flags); 437 spin_unlock_irqrestore(&die.lock, flags);
437 438
438 if (!regs) 439 if (!regs)
diff --git a/arch/i386/video/Makefile b/arch/i386/video/Makefile
new file mode 100644
index 000000000000..2c447c94adcc
--- /dev/null
+++ b/arch/i386/video/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_FB) += fbdev.o
diff --git a/arch/i386/video/fbdev.c b/arch/i386/video/fbdev.c
new file mode 100644
index 000000000000..48fb38d7d2c0
--- /dev/null
+++ b/arch/i386/video/fbdev.c
@@ -0,0 +1,32 @@
1/*
2 * arch/i386/video/fbdev.c - i386 Framebuffer
3 *
4 * Copyright (C) 2007 Antonino Daplas <adaplas@gmail.com>
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 *
10 */
11#include <linux/fb.h>
12#include <linux/pci.h>
13
14int fb_is_primary_device(struct fb_info *info)
15{
16 struct device *device = info->device;
17 struct pci_dev *pci_dev = NULL;
18 struct resource *res = NULL;
19 int retval = 0;
20
21 if (device)
22 pci_dev = to_pci_dev(device);
23
24 if (pci_dev)
25 res = &pci_dev->resource[PCI_ROM_RESOURCE];
26
27 if (res && res->flags & IORESOURCE_ROM_SHADOW)
28 retval = 1;
29
30 return retval;
31}
32EXPORT_SYMBOL(fb_is_primary_device);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index db9ddff95841..616c96e73483 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -582,8 +582,8 @@ menu "Instrumentation Support"
582source "arch/ia64/oprofile/Kconfig" 582source "arch/ia64/oprofile/Kconfig"
583 583
584config KPROBES 584config KPROBES
585 bool "Kprobes (EXPERIMENTAL)" 585 bool "Kprobes"
586 depends on KALLSYMS && EXPERIMENTAL && MODULES 586 depends on KALLSYMS && MODULES
587 help 587 help
588 Kprobes allows you to trap at almost any kernel address and 588 Kprobes allows you to trap at almost any kernel address and
589 execute a callback function. register_kprobe() establishes 589 execute a callback function. register_kprobe() establishes
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index c1dca226b479..cd4adf52f174 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -34,6 +34,7 @@
34#include <linux/efi.h> 34#include <linux/efi.h>
35#include <linux/nodemask.h> 35#include <linux/nodemask.h>
36#include <linux/bitops.h> /* hweight64() */ 36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h>
37 38
38#include <asm/delay.h> /* ia64_get_itc() */ 39#include <asm/delay.h> /* ia64_get_itc() */
39#include <asm/io.h> 40#include <asm/io.h>
@@ -43,6 +44,8 @@
43 44
44#include <asm/acpi-ext.h> 45#include <asm/acpi-ext.h>
45 46
47extern int swiotlb_late_init_with_default_size (size_t size);
48
46#define PFX "IOC: " 49#define PFX "IOC: "
47 50
48/* 51/*
@@ -2026,11 +2029,24 @@ sba_init(void)
2026 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) 2029 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2027 return 0; 2030 return 0;
2028 2031
2032#if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP)
2033 /* If we are booting a kdump kernel, the sba_iommu will
2034 * cause devices that were not shutdown properly to MCA
2035 * as soon as they are turned back on. Our only option for
2036 * a successful kdump kernel boot is to use the swiotlb.
2037 */
2038 if (elfcorehdr_addr < ELFCORE_ADDR_MAX) {
2039 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2040 panic("Unable to initialize software I/O TLB:"
2041 " Try machvec=dig boot option");
2042 machvec_init("dig");
2043 return 0;
2044 }
2045#endif
2046
2029 acpi_bus_register_driver(&acpi_sba_ioc_driver); 2047 acpi_bus_register_driver(&acpi_sba_ioc_driver);
2030 if (!ioc_list) { 2048 if (!ioc_list) {
2031#ifdef CONFIG_IA64_GENERIC 2049#ifdef CONFIG_IA64_GENERIC
2032 extern int swiotlb_late_init_with_default_size (size_t size);
2033
2034 /* 2050 /*
2035 * If we didn't find something sba_iommu can claim, we 2051 * If we didn't find something sba_iommu can claim, we
2036 * need to setup the swiotlb and switch to the dig machvec. 2052 * need to setup the swiotlb and switch to the dig machvec.
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c
index 300acd913d9c..1189d035d316 100644
--- a/arch/ia64/hp/sim/boot/fw-emu.c
+++ b/arch/ia64/hp/sim/boot/fw-emu.c
@@ -329,11 +329,6 @@ sys_fw_init (const char *args, int arglen)
329 strcpy(sal_systab->product_id, "HP-simulator"); 329 strcpy(sal_systab->product_id, "HP-simulator");
330#endif 330#endif
331 331
332#ifdef CONFIG_IA64_SDV
333 strcpy(sal_systab->oem_id, "Intel");
334 strcpy(sal_systab->product_id, "SDV");
335#endif
336
337 /* fill in an entry point: */ 332 /* fill in an entry point: */
338 sal_ed->type = SAL_DESC_ENTRY_POINT; 333 sal_ed->type = SAL_DESC_ENTRY_POINT;
339 sal_ed->pal_proc = __pa(pal_desc[0]); 334 sal_ed->pal_proc = __pa(pal_desc[0]);
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 324ea7565e2c..ef252df50e1e 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -36,10 +36,6 @@
36#include <asm/hw_irq.h> 36#include <asm/hw_irq.h>
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38 38
39#ifdef CONFIG_KDB
40# include <linux/kdb.h>
41#endif
42
43#undef SIMSERIAL_DEBUG /* define this to get some debug information */ 39#undef SIMSERIAL_DEBUG /* define this to get some debug information */
44 40
45#define KEYBOARD_INTR 3 /* must match with simulator! */ 41#define KEYBOARD_INTR 3 /* must match with simulator! */
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 75ec3478d8a2..73ca86d03810 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -28,6 +28,7 @@
28#include <linux/time.h> 28#include <linux/time.h>
29#include <linux/efi.h> 29#include <linux/efi.h>
30#include <linux/kexec.h> 30#include <linux/kexec.h>
31#include <linux/mm.h>
31 32
32#include <asm/io.h> 33#include <asm/io.h>
33#include <asm/kregs.h> 34#include <asm/kregs.h>
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 8589e84a27c6..3f926c2dc708 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -247,6 +247,9 @@ ENTRY(fsys_gettimeofday)
247.time_redo: 247.time_redo:
248 .pred.rel.mutex p8,p9,p10 248 .pred.rel.mutex p8,p9,p10
249 ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes 249 ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes
250 ;;
251 and r28 = ~1,r28 // Make sequence even to force retry if odd
252 ;;
250(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! 253(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
251 add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20 254 add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
252(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues.. 255(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues..
@@ -284,7 +287,6 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare
284(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET 287(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET
285(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo 288(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo
286 // simulate tbit.nz.or p7,p0 = r28,0 289 // simulate tbit.nz.or p7,p0 = r28,0
287 and r28 = ~1,r28 // Make sequence even to force retry if odd
288 getf.sig r2 = f8 290 getf.sig r2 = f8
289 mf 291 mf
290 add r8 = r8,r18 // Add time interpolator offset 292 add r8 = r8,r18 // Add time interpolator offset
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 15ad85da15a9..3aeaf15e468b 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -69,6 +69,7 @@ die (const char *str, struct pt_regs *regs, long err)
69 69
70 bust_spinlocks(0); 70 bust_spinlocks(0);
71 die.lock_owner = -1; 71 die.lock_owner = -1;
72 add_taint(TAINT_DIE);
72 spin_unlock_irq(&die.lock); 73 spin_unlock_irq(&die.lock);
73 74
74 if (panic_on_oops) 75 if (panic_on_oops)
diff --git a/arch/ia64/lib/checksum.c b/arch/ia64/lib/checksum.c
index 4411d9baeb21..9fc955026f86 100644
--- a/arch/ia64/lib/checksum.c
+++ b/arch/ia64/lib/checksum.c
@@ -60,6 +60,7 @@ csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
60 result = (result & 0xffffffff) + (result >> 32); 60 result = (result & 0xffffffff) + (result >> 32);
61 return (__force __wsum)result; 61 return (__force __wsum)result;
62} 62}
63EXPORT_SYMBOL(csum_tcpudp_nofold);
63 64
64extern unsigned long do_csum (const unsigned char *, long); 65extern unsigned long do_csum (const unsigned char *, long);
65 66
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 6da9854751cd..df8d5bed6119 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -750,9 +750,10 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
750 goto error; 750 goto error;
751 } else 751 } else
752 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { 752 if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
753 int cpuobj_index = 0;
754
753 memset(p, 0, a.sz); 755 memset(p, 0, a.sz);
754 for (i = 0; i < nobj; i++) { 756 for (i = 0; i < nobj; i++) {
755 int cpuobj_index = 0;
756 if (!SN_HWPERF_IS_NODE(objs + i)) 757 if (!SN_HWPERF_IS_NODE(objs + i))
757 continue; 758 continue;
758 node = sn_hwperf_obj_to_cnode(objs + i); 759 node = sn_hwperf_obj_to_cnode(objs + i);
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 5f02b3144875..57a92ef31a90 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -595,7 +595,6 @@ void ptrace_disable(struct task_struct *child)
595static int 595static int
596do_ptrace(long request, struct task_struct *child, long addr, long data) 596do_ptrace(long request, struct task_struct *child, long addr, long data)
597{ 597{
598 unsigned long tmp;
599 int ret; 598 int ret;
600 599
601 switch (request) { 600 switch (request) {
@@ -604,11 +603,7 @@ do_ptrace(long request, struct task_struct *child, long addr, long data)
604 */ 603 */
605 case PTRACE_PEEKTEXT: 604 case PTRACE_PEEKTEXT:
606 case PTRACE_PEEKDATA: 605 case PTRACE_PEEKDATA:
607 ret = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 606 ret = generic_ptrace_peekdata(child, addr, data);
608 if (ret == sizeof(tmp))
609 ret = put_user(tmp,(unsigned long __user *) data);
610 else
611 ret = -EIO;
612 break; 607 break;
613 608
614 /* 609 /*
@@ -624,15 +619,9 @@ do_ptrace(long request, struct task_struct *child, long addr, long data)
624 */ 619 */
625 case PTRACE_POKETEXT: 620 case PTRACE_POKETEXT:
626 case PTRACE_POKEDATA: 621 case PTRACE_POKEDATA:
627 ret = access_process_vm(child, addr, &data, sizeof(data), 1); 622 ret = generic_ptrace_pokedata(child, addr, data);
628 if (ret == sizeof(data)) { 623 if (ret == 0 && request == PTRACE_POKETEXT)
629 ret = 0; 624 invalidate_cache();
630 if (request == PTRACE_POKETEXT) {
631 invalidate_cache();
632 }
633 } else {
634 ret = -EIO;
635 }
636 break; 625 break;
637 626
638 /* 627 /*
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index cdba9fd6d82f..2cf0690b7882 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -128,10 +128,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
128 /* when I and D space are separate, these will need to be fixed. */ 128 /* when I and D space are separate, these will need to be fixed. */
129 case PTRACE_PEEKTEXT: /* read word at location addr. */ 129 case PTRACE_PEEKTEXT: /* read word at location addr. */
130 case PTRACE_PEEKDATA: 130 case PTRACE_PEEKDATA:
131 i = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 131 ret = generic_ptrace_peekdata(child, addr, data);
132 if (i != sizeof(tmp))
133 goto out_eio;
134 ret = put_user(tmp, (unsigned long *)data);
135 break; 132 break;
136 133
137 /* read the word at location addr in the USER area. */ 134 /* read the word at location addr in the USER area. */
@@ -160,8 +157,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
160 /* when I and D space are separate, this will have to be fixed. */ 157 /* when I and D space are separate, this will have to be fixed. */
161 case PTRACE_POKETEXT: /* write the word at location addr. */ 158 case PTRACE_POKETEXT: /* write the word at location addr. */
162 case PTRACE_POKEDATA: 159 case PTRACE_POKEDATA:
163 if (access_process_vm(child, addr, &data, sizeof(data), 1) != sizeof(data)) 160 ret = generic_ptrace_pokedata(child, addr, data);
164 goto out_eio;
165 break; 161 break;
166 162
167 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 163 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index a27a4fa33296..4e2752a0e89b 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1170,6 +1170,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
1170 console_verbose(); 1170 console_verbose();
1171 printk("%s: %08x\n",str,nr); 1171 printk("%s: %08x\n",str,nr);
1172 show_registers(fp); 1172 show_registers(fp);
1173 add_taint(TAINT_DIE);
1173 do_exit(SIGSEGV); 1174 do_exit(SIGSEGV);
1174} 1175}
1175 1176
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
index cf6bb51945a2..6216f12a756b 100644
--- a/arch/m68k/lib/checksum.c
+++ b/arch/m68k/lib/checksum.c
@@ -422,3 +422,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
422 ); 422 );
423 return(sum); 423 return(sum);
424} 424}
425EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index f54b6a3dfecb..ef70ca070ce2 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -106,17 +106,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
106 switch (request) { 106 switch (request) {
107 /* when I and D space are separate, these will need to be fixed. */ 107 /* when I and D space are separate, these will need to be fixed. */
108 case PTRACE_PEEKTEXT: /* read word at location addr. */ 108 case PTRACE_PEEKTEXT: /* read word at location addr. */
109 case PTRACE_PEEKDATA: { 109 case PTRACE_PEEKDATA:
110 unsigned long tmp; 110 ret = generic_ptrace_peekdata(child, addr, data);
111 int copied;
112
113 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
114 ret = -EIO;
115 if (copied != sizeof(tmp))
116 break;
117 ret = put_user(tmp,(unsigned long *) data);
118 break; 111 break;
119 }
120 112
121 /* read the word at location addr in the USER area. */ 113 /* read the word at location addr in the USER area. */
122 case PTRACE_PEEKUSR: { 114 case PTRACE_PEEKUSR: {
@@ -159,10 +151,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
159 /* when I and D space are separate, this will have to be fixed. */ 151 /* when I and D space are separate, this will have to be fixed. */
160 case PTRACE_POKETEXT: /* write the word at location addr. */ 152 case PTRACE_POKETEXT: /* write the word at location addr. */
161 case PTRACE_POKEDATA: 153 case PTRACE_POKEDATA:
162 ret = 0; 154 ret = generic_ptrace_pokedata(child, addr, data);
163 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
164 break;
165 ret = -EIO;
166 break; 155 break;
167 156
168 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 157 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/m68knommu/kernel/traps.c b/arch/m68knommu/kernel/traps.c
index bed5f47bf568..fde04e1757f7 100644
--- a/arch/m68knommu/kernel/traps.c
+++ b/arch/m68knommu/kernel/traps.c
@@ -83,6 +83,7 @@ void die_if_kernel(char *str, struct pt_regs *fp, int nr)
83 printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n", 83 printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
84 current->comm, current->pid, PAGE_SIZE+(unsigned long)current); 84 current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
85 show_stack(NULL, (unsigned long *)fp); 85 show_stack(NULL, (unsigned long *)fp);
86 add_taint(TAINT_DIE);
86 do_exit(SIGSEGV); 87 do_exit(SIGSEGV);
87} 88}
88 89
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index b5a7b46bbc49..893e7bccf226 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -174,17 +174,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
174 switch (request) { 174 switch (request) {
175 /* when I and D space are separate, these will need to be fixed. */ 175 /* when I and D space are separate, these will need to be fixed. */
176 case PTRACE_PEEKTEXT: /* read word at location addr. */ 176 case PTRACE_PEEKTEXT: /* read word at location addr. */
177 case PTRACE_PEEKDATA: { 177 case PTRACE_PEEKDATA:
178 unsigned long tmp; 178 ret = generic_ptrace_peekdata(child, addr, data);
179 int copied;
180
181 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
182 ret = -EIO;
183 if (copied != sizeof(tmp))
184 break;
185 ret = put_user(tmp,(unsigned long __user *) data);
186 break; 179 break;
187 }
188 180
189 /* Read the word at location addr in the USER area. */ 181 /* Read the word at location addr in the USER area. */
190 case PTRACE_PEEKUSR: { 182 case PTRACE_PEEKUSR: {
@@ -313,11 +305,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
313 /* when I and D space are separate, this will have to be fixed. */ 305 /* when I and D space are separate, this will have to be fixed. */
314 case PTRACE_POKETEXT: /* write the word at location addr. */ 306 case PTRACE_POKETEXT: /* write the word at location addr. */
315 case PTRACE_POKEDATA: 307 case PTRACE_POKEDATA:
316 ret = 0; 308 ret = generic_ptrace_pokedata(child, addr, data);
317 if (access_process_vm(child, addr, &data, sizeof(data), 1)
318 == sizeof(data))
319 break;
320 ret = -EIO;
321 break; 309 break;
322 310
323 case PTRACE_POKEUSR: { 311 case PTRACE_POKEUSR: {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 37c562c4c817..ce277cb34dd0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -326,6 +326,7 @@ void __noreturn die(const char * str, struct pt_regs * regs)
326#endif /* CONFIG_MIPS_MT_SMTC */ 326#endif /* CONFIG_MIPS_MT_SMTC */
327 printk("%s[#%d]:\n", str, ++die_counter); 327 printk("%s[#%d]:\n", str, ++die_counter);
328 show_registers(regs); 328 show_registers(regs);
329 add_taint(TAINT_DIE);
329 spin_unlock_irq(&die_lock); 330 spin_unlock_irq(&die_lock);
330 331
331 if (in_interrupt()) 332 if (in_interrupt())
diff --git a/arch/mips/sibyte/bcm1480/setup.c b/arch/mips/sibyte/bcm1480/setup.c
index bdaac34ae708..89f29233cae1 100644
--- a/arch/mips/sibyte/bcm1480/setup.c
+++ b/arch/mips/sibyte/bcm1480/setup.c
@@ -31,6 +31,7 @@
31unsigned int sb1_pass; 31unsigned int sb1_pass;
32unsigned int soc_pass; 32unsigned int soc_pass;
33unsigned int soc_type; 33unsigned int soc_type;
34EXPORT_SYMBOL(soc_type);
34unsigned int periph_rev; 35unsigned int periph_rev;
35unsigned int zbbus_mhz; 36unsigned int zbbus_mhz;
36 37
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index f4a6169aa0a4..2d5c6d8b41f2 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -31,6 +31,7 @@
31unsigned int sb1_pass; 31unsigned int sb1_pass;
32unsigned int soc_pass; 32unsigned int soc_pass;
33unsigned int soc_type; 33unsigned int soc_type;
34EXPORT_SYMBOL(soc_type);
34unsigned int periph_rev; 35unsigned int periph_rev;
35unsigned int zbbus_mhz; 36unsigned int zbbus_mhz;
36EXPORT_SYMBOL(zbbus_mhz); 37EXPORT_SYMBOL(zbbus_mhz);
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 8a0db376e91e..26ec774c5027 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -87,10 +87,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
87 switch (request) { 87 switch (request) {
88 case PTRACE_PEEKTEXT: /* read word at location addr. */ 88 case PTRACE_PEEKTEXT: /* read word at location addr. */
89 case PTRACE_PEEKDATA: { 89 case PTRACE_PEEKDATA: {
90 int copied;
91
92#ifdef CONFIG_64BIT 90#ifdef CONFIG_64BIT
93 if (__is_compat_task(child)) { 91 if (__is_compat_task(child)) {
92 int copied;
94 unsigned int tmp; 93 unsigned int tmp;
95 94
96 addr &= 0xffffffffL; 95 addr &= 0xffffffffL;
@@ -105,15 +104,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
105 } 104 }
106 else 105 else
107#endif 106#endif
108 { 107 ret = generic_ptrace_peekdata(child, addr, data);
109 unsigned long tmp;
110
111 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
112 ret = -EIO;
113 if (copied != sizeof(tmp))
114 goto out_tsk;
115 ret = put_user(tmp,(unsigned long *) data);
116 }
117 goto out_tsk; 108 goto out_tsk;
118 } 109 }
119 110
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index f9bca2d74b38..bbf029a184ac 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -264,6 +264,7 @@ KERN_CRIT " || ||\n");
264 264
265 show_regs(regs); 265 show_regs(regs);
266 dump_stack(); 266 dump_stack();
267 add_taint(TAINT_DIE);
267 268
268 if (in_interrupt()) 269 if (in_interrupt())
269 panic("Fatal exception in interrupt"); 270 panic("Fatal exception in interrupt");
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 322167737de7..cf780cb3b916 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -242,7 +242,7 @@ static void unwind_frame_regs(struct unwind_frame_info *info)
242#ifdef CONFIG_KALLSYMS 242#ifdef CONFIG_KALLSYMS
243 /* Handle some frequent special cases.... */ 243 /* Handle some frequent special cases.... */
244 { 244 {
245 char symname[KSYM_NAME_LEN+1]; 245 char symname[KSYM_NAME_LEN];
246 char *modname; 246 char *modname;
247 247
248 kallsyms_lookup(info->ip, NULL, NULL, &modname, 248 kallsyms_lookup(info->ip, NULL, NULL, &modname,
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e641bb68d871..d860b640a140 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -640,8 +640,8 @@ menu "Instrumentation Support"
640source "arch/powerpc/oprofile/Kconfig" 640source "arch/powerpc/oprofile/Kconfig"
641 641
642config KPROBES 642config KPROBES
643 bool "Kprobes (EXPERIMENTAL)" 643 bool "Kprobes"
644 depends on !BOOKE && !4xx && KALLSYMS && EXPERIMENTAL && MODULES 644 depends on !BOOKE && !4xx && KALLSYMS && MODULES
645 help 645 help
646 Kprobes allows you to trap at almost any kernel address and 646 Kprobes allows you to trap at almost any kernel address and
647 execute a callback function. register_kprobe() establishes 647 execute a callback function. register_kprobe() establishes
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 0fb53950da43..8a177bd9eab4 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -379,17 +379,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
379 switch (request) { 379 switch (request) {
380 /* when I and D space are separate, these will need to be fixed. */ 380 /* when I and D space are separate, these will need to be fixed. */
381 case PTRACE_PEEKTEXT: /* read word at location addr. */ 381 case PTRACE_PEEKTEXT: /* read word at location addr. */
382 case PTRACE_PEEKDATA: { 382 case PTRACE_PEEKDATA:
383 unsigned long tmp; 383 ret = generic_ptrace_peekdata(child, addr, data);
384 int copied;
385
386 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
387 ret = -EIO;
388 if (copied != sizeof(tmp))
389 break;
390 ret = put_user(tmp,(unsigned long __user *) data);
391 break; 384 break;
392 }
393 385
394 /* read the word at location addr in the USER area. */ 386 /* read the word at location addr in the USER area. */
395 case PTRACE_PEEKUSR: { 387 case PTRACE_PEEKUSR: {
@@ -421,11 +413,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
421 /* If I and D space are separate, this will have to be fixed. */ 413 /* If I and D space are separate, this will have to be fixed. */
422 case PTRACE_POKETEXT: /* write the word at location addr. */ 414 case PTRACE_POKETEXT: /* write the word at location addr. */
423 case PTRACE_POKEDATA: 415 case PTRACE_POKEDATA:
424 ret = 0; 416 ret = generic_ptrace_pokedata(child, addr, data);
425 if (access_process_vm(child, addr, &data, sizeof(data), 1)
426 == sizeof(data))
427 break;
428 ret = -EIO;
429 break; 417 break;
430 418
431 /* write the word at location addr in the USER area */ 419 /* write the word at location addr in the USER area */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 3b8427e6283d..2bb1cb911783 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -149,6 +149,7 @@ int die(const char *str, struct pt_regs *regs, long err)
149 149
150 bust_spinlocks(0); 150 bust_spinlocks(0);
151 die.lock_owner = -1; 151 die.lock_owner = -1;
152 add_taint(TAINT_DIE);
152 spin_unlock_irqrestore(&die.lock, flags); 153 spin_unlock_irqrestore(&die.lock, flags);
153 154
154 if (kexec_should_crash(current) || 155 if (kexec_should_crash(current) ||
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index dc27dab48df0..5a808d611ae3 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -40,7 +40,7 @@
40#include <asm/prom.h> 40#include <asm/prom.h>
41 41
42extern void cpm_reset(void); 42extern void cpm_reset(void);
43extern void mpc8xx_show_cpuinfo(struct seq_file*); 43extern void mpc8xx_show_cpuinfo(struct seq_file *);
44extern void mpc8xx_restart(char *cmd); 44extern void mpc8xx_restart(char *cmd);
45extern void mpc8xx_calibrate_decr(void); 45extern void mpc8xx_calibrate_decr(void);
46extern int mpc8xx_set_rtc_time(struct rtc_time *tm); 46extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
@@ -48,9 +48,9 @@ extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
48extern void m8xx_pic_init(void); 48extern void m8xx_pic_init(void);
49extern unsigned int mpc8xx_get_irq(void); 49extern unsigned int mpc8xx_get_irq(void);
50 50
51static void init_smc1_uart_ioports(struct fs_uart_platform_info* fpi); 51static void init_smc1_uart_ioports(struct fs_uart_platform_info *fpi);
52static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi); 52static void init_smc2_uart_ioports(struct fs_uart_platform_info *fpi);
53static void init_scc3_ioports(struct fs_platform_info* ptr); 53static void init_scc3_ioports(struct fs_platform_info *ptr);
54 54
55#ifdef CONFIG_PCMCIA_M8XX 55#ifdef CONFIG_PCMCIA_M8XX
56static void pcmcia_hw_setup(int slot, int enable) 56static void pcmcia_hw_setup(int slot, int enable)
@@ -73,7 +73,7 @@ static int pcmcia_set_voltage(int slot, int vcc, int vpp)
73 73
74 bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); 74 bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
75 75
76 switch(vcc) { 76 switch (vcc) {
77 case 0: 77 case 0:
78 break; 78 break;
79 case 33: 79 case 33:
@@ -86,12 +86,12 @@ static int pcmcia_set_voltage(int slot, int vcc, int vpp)
86 return 1; 86 return 1;
87 } 87 }
88 88
89 switch(vpp) { 89 switch (vpp) {
90 case 0: 90 case 0:
91 break; 91 break;
92 case 33: 92 case 33:
93 case 50: 93 case 50:
94 if(vcc == vpp) 94 if (vcc == vpp)
95 reg |= BCSR1_PCCVPP1; 95 reg |= BCSR1_PCCVPP1;
96 else 96 else
97 return 1; 97 return 1;
@@ -127,7 +127,7 @@ void __init mpc885ads_board_setup(void)
127#endif 127#endif
128 128
129 bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); 129 bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
130 cp = (cpm8xx_t *)immr_map(im_cpm); 130 cp = (cpm8xx_t *) immr_map(im_cpm);
131 131
132 if (bcsr_io == NULL) { 132 if (bcsr_io == NULL) {
133 printk(KERN_CRIT "Could not remap BCSR\n"); 133 printk(KERN_CRIT "Could not remap BCSR\n");
@@ -140,13 +140,13 @@ void __init mpc885ads_board_setup(void)
140 out_8(&(cp->cp_smc[0].smc_smcm), tmpval8); 140 out_8(&(cp->cp_smc[0].smc_smcm), tmpval8);
141 clrbits16(&cp->cp_smc[0].smc_smcmr, SMCMR_REN | SMCMR_TEN); /* brg1 */ 141 clrbits16(&cp->cp_smc[0].smc_smcmr, SMCMR_REN | SMCMR_TEN); /* brg1 */
142#else 142#else
143 setbits32(bcsr_io,BCSR1_RS232EN_1); 143 setbits32(bcsr_io, BCSR1_RS232EN_1);
144 out_be16(&cp->cp_smc[0].smc_smcmr, 0); 144 out_be16(&cp->cp_smc[0].smc_smcmr, 0);
145 out_8(&cp->cp_smc[0].smc_smce, 0); 145 out_8(&cp->cp_smc[0].smc_smce, 0);
146#endif 146#endif
147 147
148#ifdef CONFIG_SERIAL_CPM_SMC2 148#ifdef CONFIG_SERIAL_CPM_SMC2
149 clrbits32(bcsr_io,BCSR1_RS232EN_2); 149 clrbits32(bcsr_io, BCSR1_RS232EN_2);
150 clrbits32(&cp->cp_simode, 0xe0000000 >> 1); 150 clrbits32(&cp->cp_simode, 0xe0000000 >> 1);
151 setbits32(&cp->cp_simode, 0x20000000 >> 1); /* brg2 */ 151 setbits32(&cp->cp_simode, 0x20000000 >> 1); /* brg2 */
152 tmpval8 = in_8(&(cp->cp_smc[1].smc_smcm)) | (SMCM_RX | SMCM_TX); 152 tmpval8 = in_8(&(cp->cp_smc[1].smc_smcm)) | (SMCM_RX | SMCM_TX);
@@ -155,7 +155,7 @@ void __init mpc885ads_board_setup(void)
155 155
156 init_smc2_uart_ioports(0); 156 init_smc2_uart_ioports(0);
157#else 157#else
158 setbits32(bcsr_io,BCSR1_RS232EN_2); 158 setbits32(bcsr_io, BCSR1_RS232EN_2);
159 out_be16(&cp->cp_smc[1].smc_smcmr, 0); 159 out_be16(&cp->cp_smc[1].smc_smcmr, 0);
160 out_8(&cp->cp_smc[1].smc_smce, 0); 160 out_8(&cp->cp_smc[1].smc_smce, 0);
161#endif 161#endif
@@ -164,16 +164,16 @@ void __init mpc885ads_board_setup(void)
164 164
165#ifdef CONFIG_FS_ENET 165#ifdef CONFIG_FS_ENET
166 /* use MDC for MII (common) */ 166 /* use MDC for MII (common) */
167 io_port = (iop8xx_t*)immr_map(im_ioport); 167 io_port = (iop8xx_t *) immr_map(im_ioport);
168 setbits16(&io_port->iop_pdpar, 0x0080); 168 setbits16(&io_port->iop_pdpar, 0x0080);
169 clrbits16(&io_port->iop_pddir, 0x0080); 169 clrbits16(&io_port->iop_pddir, 0x0080);
170 170
171 bcsr_io = ioremap(BCSR5, sizeof(unsigned long)); 171 bcsr_io = ioremap(BCSR5, sizeof(unsigned long));
172 clrbits32(bcsr_io,BCSR5_MII1_EN); 172 clrbits32(bcsr_io, BCSR5_MII1_EN);
173 clrbits32(bcsr_io,BCSR5_MII1_RST); 173 clrbits32(bcsr_io, BCSR5_MII1_RST);
174#ifndef CONFIG_FC_ENET_HAS_SCC 174#ifndef CONFIG_FC_ENET_HAS_SCC
175 clrbits32(bcsr_io,BCSR5_MII2_EN); 175 clrbits32(bcsr_io, BCSR5_MII2_EN);
176 clrbits32(bcsr_io,BCSR5_MII2_RST); 176 clrbits32(bcsr_io, BCSR5_MII2_RST);
177 177
178#endif 178#endif
179 iounmap(bcsr_io); 179 iounmap(bcsr_io);
@@ -182,17 +182,16 @@ void __init mpc885ads_board_setup(void)
182#endif 182#endif
183 183
184#ifdef CONFIG_PCMCIA_M8XX 184#ifdef CONFIG_PCMCIA_M8XX
185 /*Set up board specific hook-ups*/ 185 /*Set up board specific hook-ups */
186 m8xx_pcmcia_ops.hw_ctrl = pcmcia_hw_setup; 186 m8xx_pcmcia_ops.hw_ctrl = pcmcia_hw_setup;
187 m8xx_pcmcia_ops.voltage_set = pcmcia_set_voltage; 187 m8xx_pcmcia_ops.voltage_set = pcmcia_set_voltage;
188#endif 188#endif
189} 189}
190 190
191 191static void init_fec1_ioports(struct fs_platform_info *ptr)
192static void init_fec1_ioports(struct fs_platform_info* ptr)
193{ 192{
194 cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm); 193 cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm);
195 iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport); 194 iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport);
196 195
197 /* configure FEC1 pins */ 196 /* configure FEC1 pins */
198 setbits16(&io_port->iop_papar, 0xf830); 197 setbits16(&io_port->iop_papar, 0xf830);
@@ -214,11 +213,10 @@ static void init_fec1_ioports(struct fs_platform_info* ptr)
214 immr_unmap(cp); 213 immr_unmap(cp);
215} 214}
216 215
217 216static void init_fec2_ioports(struct fs_platform_info *ptr)
218static void init_fec2_ioports(struct fs_platform_info* ptr)
219{ 217{
220 cpm8xx_t *cp = (cpm8xx_t *)immr_map(im_cpm); 218 cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm);
221 iop8xx_t *io_port = (iop8xx_t *)immr_map(im_ioport); 219 iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport);
222 220
223 /* configure FEC2 pins */ 221 /* configure FEC2 pins */
224 setbits32(&cp->cp_pepar, 0x0003fffc); 222 setbits32(&cp->cp_pepar, 0x0003fffc);
@@ -248,15 +246,15 @@ void init_fec_ioports(struct fs_platform_info *fpi)
248 } 246 }
249} 247}
250 248
251static void init_scc3_ioports(struct fs_platform_info* fpi) 249static void init_scc3_ioports(struct fs_platform_info *fpi)
252{ 250{
253 unsigned *bcsr_io; 251 unsigned *bcsr_io;
254 iop8xx_t *io_port; 252 iop8xx_t *io_port;
255 cpm8xx_t *cp; 253 cpm8xx_t *cp;
256 254
257 bcsr_io = ioremap(BCSR_ADDR, BCSR_SIZE); 255 bcsr_io = ioremap(BCSR_ADDR, BCSR_SIZE);
258 io_port = (iop8xx_t *)immr_map(im_ioport); 256 io_port = (iop8xx_t *) immr_map(im_ioport);
259 cp = (cpm8xx_t *)immr_map(im_cpm); 257 cp = (cpm8xx_t *) immr_map(im_cpm);
260 258
261 if (bcsr_io == NULL) { 259 if (bcsr_io == NULL) {
262 printk(KERN_CRIT "Could not remap BCSR\n"); 260 printk(KERN_CRIT "Could not remap BCSR\n");
@@ -265,9 +263,9 @@ static void init_scc3_ioports(struct fs_platform_info* fpi)
265 263
266 /* Enable the PHY. 264 /* Enable the PHY.
267 */ 265 */
268 clrbits32(bcsr_io+4, BCSR4_ETH10_RST); 266 clrbits32(bcsr_io + 4, BCSR4_ETH10_RST);
269 udelay(1000); 267 udelay(1000);
270 setbits32(bcsr_io+4, BCSR4_ETH10_RST); 268 setbits32(bcsr_io + 4, BCSR4_ETH10_RST);
271 /* Configure port A pins for Txd and Rxd. 269 /* Configure port A pins for Txd and Rxd.
272 */ 270 */
273 setbits16(&io_port->iop_papar, PA_ENET_RXD | PA_ENET_TXD); 271 setbits16(&io_port->iop_papar, PA_ENET_RXD | PA_ENET_TXD);
@@ -283,8 +281,7 @@ static void init_scc3_ioports(struct fs_platform_info* fpi)
283 */ 281 */
284 setbits32(&cp->cp_pepar, PE_ENET_TCLK | PE_ENET_RCLK); 282 setbits32(&cp->cp_pepar, PE_ENET_TCLK | PE_ENET_RCLK);
285 clrbits32(&cp->cp_pepar, PE_ENET_TENA); 283 clrbits32(&cp->cp_pepar, PE_ENET_TENA);
286 clrbits32(&cp->cp_pedir, 284 clrbits32(&cp->cp_pedir, PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA);
287 PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA);
288 clrbits32(&cp->cp_peso, PE_ENET_TCLK | PE_ENET_RCLK); 285 clrbits32(&cp->cp_peso, PE_ENET_TCLK | PE_ENET_RCLK);
289 setbits32(&cp->cp_peso, PE_ENET_TENA); 286 setbits32(&cp->cp_peso, PE_ENET_TENA);
290 287
@@ -308,7 +305,7 @@ static void init_scc3_ioports(struct fs_platform_info* fpi)
308 clrbits32(&cp->cp_pedir, PE_ENET_TENA); 305 clrbits32(&cp->cp_pedir, PE_ENET_TENA);
309 setbits32(&cp->cp_peso, PE_ENET_TENA); 306 setbits32(&cp->cp_peso, PE_ENET_TENA);
310 307
311 setbits32(bcsr_io+4, BCSR1_ETHEN); 308 setbits32(bcsr_io + 4, BCSR1_ETHEN);
312 iounmap(bcsr_io); 309 iounmap(bcsr_io);
313 immr_unmap(io_port); 310 immr_unmap(io_port);
314 immr_unmap(cp); 311 immr_unmap(cp);
@@ -328,50 +325,48 @@ void init_scc_ioports(struct fs_platform_info *fpi)
328 } 325 }
329} 326}
330 327
331 328static void init_smc1_uart_ioports(struct fs_uart_platform_info *ptr)
332
333static void init_smc1_uart_ioports(struct fs_uart_platform_info* ptr)
334{ 329{
335 unsigned *bcsr_io; 330 unsigned *bcsr_io;
336 cpm8xx_t *cp; 331 cpm8xx_t *cp;
337 332
338 cp = (cpm8xx_t *)immr_map(im_cpm); 333 cp = (cpm8xx_t *) immr_map(im_cpm);
339 setbits32(&cp->cp_pepar, 0x000000c0); 334 setbits32(&cp->cp_pepar, 0x000000c0);
340 clrbits32(&cp->cp_pedir, 0x000000c0); 335 clrbits32(&cp->cp_pedir, 0x000000c0);
341 clrbits32(&cp->cp_peso, 0x00000040); 336 clrbits32(&cp->cp_peso, 0x00000040);
342 setbits32(&cp->cp_peso, 0x00000080); 337 setbits32(&cp->cp_peso, 0x00000080);
343 immr_unmap(cp); 338 immr_unmap(cp);
344 339
345 bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); 340 bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
346 341
347 if (bcsr_io == NULL) { 342 if (bcsr_io == NULL) {
348 printk(KERN_CRIT "Could not remap BCSR1\n"); 343 printk(KERN_CRIT "Could not remap BCSR1\n");
349 return; 344 return;
350 } 345 }
351 clrbits32(bcsr_io,BCSR1_RS232EN_1); 346 clrbits32(bcsr_io, BCSR1_RS232EN_1);
352 iounmap(bcsr_io); 347 iounmap(bcsr_io);
353} 348}
354 349
355static void init_smc2_uart_ioports(struct fs_uart_platform_info* fpi) 350static void init_smc2_uart_ioports(struct fs_uart_platform_info *fpi)
356{ 351{
357 unsigned *bcsr_io; 352 unsigned *bcsr_io;
358 cpm8xx_t *cp; 353 cpm8xx_t *cp;
359 354
360 cp = (cpm8xx_t *)immr_map(im_cpm); 355 cp = (cpm8xx_t *) immr_map(im_cpm);
361 setbits32(&cp->cp_pepar, 0x00000c00); 356 setbits32(&cp->cp_pepar, 0x00000c00);
362 clrbits32(&cp->cp_pedir, 0x00000c00); 357 clrbits32(&cp->cp_pedir, 0x00000c00);
363 clrbits32(&cp->cp_peso, 0x00000400); 358 clrbits32(&cp->cp_peso, 0x00000400);
364 setbits32(&cp->cp_peso, 0x00000800); 359 setbits32(&cp->cp_peso, 0x00000800);
365 immr_unmap(cp); 360 immr_unmap(cp);
366 361
367 bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); 362 bcsr_io = ioremap(BCSR1, sizeof(unsigned long));
368 363
369 if (bcsr_io == NULL) { 364 if (bcsr_io == NULL) {
370 printk(KERN_CRIT "Could not remap BCSR1\n"); 365 printk(KERN_CRIT "Could not remap BCSR1\n");
371 return; 366 return;
372 } 367 }
373 clrbits32(bcsr_io,BCSR1_RS232EN_2); 368 clrbits32(bcsr_io, BCSR1_RS232EN_2);
374 iounmap(bcsr_io); 369 iounmap(bcsr_io);
375} 370}
376 371
377void init_smc_ioports(struct fs_uart_platform_info *data) 372void init_smc_ioports(struct fs_uart_platform_info *data)
@@ -444,15 +439,11 @@ static int __init mpc885ads_probe(void)
444 return 1; 439 return 1;
445} 440}
446 441
447define_machine(mpc885_ads) { 442define_machine(mpc885_ads)
448 .name = "MPC885 ADS", 443{
449 .probe = mpc885ads_probe, 444.name = "MPC885 ADS",.probe = mpc885ads_probe,.setup_arch =
450 .setup_arch = mpc885ads_setup_arch, 445 mpc885ads_setup_arch,.init_IRQ =
451 .init_IRQ = m8xx_pic_init, 446 m8xx_pic_init,.show_cpuinfo = mpc8xx_show_cpuinfo,.get_irq =
452 .show_cpuinfo = mpc8xx_show_cpuinfo, 447 mpc8xx_get_irq,.restart = mpc8xx_restart,.calibrate_decr =
453 .get_irq = mpc8xx_get_irq, 448 mpc8xx_calibrate_decr,.set_rtc_time =
454 .restart = mpc8xx_restart, 449 mpc8xx_set_rtc_time,.get_rtc_time = mpc8xx_get_rtc_time,};
455 .calibrate_decr = mpc8xx_calibrate_decr,
456 .set_rtc_time = mpc8xx_set_rtc_time,
457 .get_rtc_time = mpc8xx_get_rtc_time,
458};
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index e4d0c9f42abd..96a8f609690c 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -31,6 +31,7 @@
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/linux_logo.h>
34#include <asm/spu.h> 35#include <asm/spu.h>
35#include <asm/spu_priv1.h> 36#include <asm/spu_priv1.h>
36#include <asm/xmon.h> 37#include <asm/xmon.h>
@@ -656,12 +657,24 @@ static int __init init_spu_base(void)
656 657
657 ret = spu_enumerate_spus(create_spu); 658 ret = spu_enumerate_spus(create_spu);
658 659
659 if (ret) { 660 if (ret < 0) {
660 printk(KERN_WARNING "%s: Error initializing spus\n", 661 printk(KERN_WARNING "%s: Error initializing spus\n",
661 __FUNCTION__); 662 __FUNCTION__);
662 goto out_unregister_sysdev_class; 663 goto out_unregister_sysdev_class;
663 } 664 }
664 665
666 if (ret > 0) {
667 /*
668 * We cannot put the forward declaration in
669 * <linux/linux_logo.h> because of conflicting session type
670 * conflicts for const and __initdata with different compiler
671 * versions
672 */
673 extern const struct linux_logo logo_spe_clut224;
674
675 fb_append_extra_logo(&logo_spe_clut224, ret);
676 }
677
665 xmon_register_spus(&spu_full_list); 678 xmon_register_spus(&spu_full_list);
666 679
667 spu_add_sysdev_attr(&attr_stat); 680 spu_add_sysdev_attr(&attr_stat);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 1d4562ae463d..75ed50fcc3db 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -279,6 +279,7 @@ static int __init of_enumerate_spus(int (*fn)(void *data))
279{ 279{
280 int ret; 280 int ret;
281 struct device_node *node; 281 struct device_node *node;
282 unsigned int n = 0;
282 283
283 ret = -ENODEV; 284 ret = -ENODEV;
284 for (node = of_find_node_by_type(NULL, "spe"); 285 for (node = of_find_node_by_type(NULL, "spe");
@@ -289,8 +290,9 @@ static int __init of_enumerate_spus(int (*fn)(void *data))
289 __FUNCTION__, node->name); 290 __FUNCTION__, node->name);
290 break; 291 break;
291 } 292 }
293 n++;
292 } 294 }
293 return ret; 295 return ret ? ret : n;
294} 296}
295 297
296static int __init of_create_spu(struct spu *spu, void *data) 298static int __init of_create_spu(struct spu *spu, void *data)
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index c7f734c89462..502d80ed982b 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -405,11 +405,13 @@ static int __init ps3_enumerate_spus(int (*fn)(void *data))
405 } 405 }
406 } 406 }
407 407
408 if (result) 408 if (result) {
409 printk(KERN_WARNING "%s:%d: Error initializing spus\n", 409 printk(KERN_WARNING "%s:%d: Error initializing spus\n",
410 __func__, __LINE__); 410 __func__, __LINE__);
411 return result;
412 }
411 413
412 return result; 414 return num_resource_id;
413} 415}
414 416
415const struct spu_management_ops spu_management_ps3_ops = { 417const struct spu_management_ops spu_management_ps3_ops = {
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 0eaef7c8378b..3f3b292eb773 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -92,6 +92,7 @@ int die(const char * str, struct pt_regs * fp, long err)
92 if (nl) 92 if (nl)
93 printk("\n"); 93 printk("\n");
94 show_regs(fp); 94 show_regs(fp);
95 add_taint(TAINT_DIE);
95 spin_unlock_irq(&die_lock); 96 spin_unlock_irq(&die_lock);
96 /* do_exit() should take care of panic'ing from an interrupt 97 /* do_exit() should take care of panic'ing from an interrupt
97 * context so we don't handle it here 98 * context so we don't handle it here
diff --git a/arch/ppc/syslib/virtex_devices.h b/arch/ppc/syslib/virtex_devices.h
index 3d4be1412f60..9f38d92ae536 100644
--- a/arch/ppc/syslib/virtex_devices.h
+++ b/arch/ppc/syslib/virtex_devices.h
@@ -31,4 +31,11 @@ void __init virtex_early_serial_map(void);
31 */ 31 */
32int virtex_device_fixup(struct platform_device *dev); 32int virtex_device_fixup(struct platform_device *dev);
33 33
34/* SPI Controller IP */
35struct xspi_platform_data {
36 s16 bus_num;
37 u16 num_chipselect;
38 u32 speed_hz;
39};
40
34#endif /* __ASM_VIRTEX_DEVICES_H__ */ 41#endif /* __ASM_VIRTEX_DEVICES_H__ */
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 2a8f0872ea8b..f4503ca27630 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -294,7 +294,6 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
294static int 294static int
295do_ptrace_normal(struct task_struct *child, long request, long addr, long data) 295do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
296{ 296{
297 unsigned long tmp;
298 ptrace_area parea; 297 ptrace_area parea;
299 int copied, ret; 298 int copied, ret;
300 299
@@ -304,10 +303,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
304 /* Remove high order bit from address (only for 31 bit). */ 303 /* Remove high order bit from address (only for 31 bit). */
305 addr &= PSW_ADDR_INSN; 304 addr &= PSW_ADDR_INSN;
306 /* read word at location addr. */ 305 /* read word at location addr. */
307 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); 306 return generic_ptrace_peekdata(child, addr, data);
308 if (copied != sizeof(tmp))
309 return -EIO;
310 return put_user(tmp, (unsigned long __force __user *) data);
311 307
312 case PTRACE_PEEKUSR: 308 case PTRACE_PEEKUSR:
313 /* read the word at location addr in the USER area. */ 309 /* read the word at location addr in the USER area. */
@@ -318,10 +314,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
318 /* Remove high order bit from address (only for 31 bit). */ 314 /* Remove high order bit from address (only for 31 bit). */
319 addr &= PSW_ADDR_INSN; 315 addr &= PSW_ADDR_INSN;
320 /* write the word at location addr. */ 316 /* write the word at location addr. */
321 copied = access_process_vm(child, addr, &data, sizeof(data),1); 317 return generic_ptrace_pokedata(child, addr, data);
322 if (copied != sizeof(data))
323 return -EIO;
324 return 0;
325 318
326 case PTRACE_POKEUSR: 319 case PTRACE_POKEUSR:
327 /* write the word at location addr in the USER area */ 320 /* write the word at location addr in the USER area */
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 81e03b9c3841..8ec9def83ccb 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -262,6 +262,7 @@ void die(const char * str, struct pt_regs * regs, long err)
262 print_modules(); 262 print_modules();
263 show_regs(regs); 263 show_regs(regs);
264 bust_spinlocks(0); 264 bust_spinlocks(0);
265 add_taint(TAINT_DIE);
265 spin_unlock_irq(&die_lock); 266 spin_unlock_irq(&die_lock);
266 if (in_interrupt()) 267 if (in_interrupt())
267 panic("Fatal exception in interrupt"); 268 panic("Fatal exception in interrupt");
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
index f2eaa485d04d..891d1d46c902 100644
--- a/arch/sh/kernel/ptrace.c
+++ b/arch/sh/kernel/ptrace.c
@@ -91,17 +91,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
91 switch (request) { 91 switch (request) {
92 /* when I and D space are separate, these will need to be fixed. */ 92 /* when I and D space are separate, these will need to be fixed. */
93 case PTRACE_PEEKTEXT: /* read word at location addr. */ 93 case PTRACE_PEEKTEXT: /* read word at location addr. */
94 case PTRACE_PEEKDATA: { 94 case PTRACE_PEEKDATA:
95 unsigned long tmp; 95 ret = generic_ptrace_peekdata(child, addr, data);
96 int copied;
97
98 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
99 ret = -EIO;
100 if (copied != sizeof(tmp))
101 break;
102 ret = put_user(tmp,(unsigned long __user *) data);
103 break;
104 }
105 96
106 /* read the word at location addr in the USER area. */ 97 /* read the word at location addr in the USER area. */
107 case PTRACE_PEEKUSR: { 98 case PTRACE_PEEKUSR: {
@@ -135,10 +126,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
135 /* when I and D space are separate, this will have to be fixed. */ 126 /* when I and D space are separate, this will have to be fixed. */
136 case PTRACE_POKETEXT: /* write the word at location addr. */ 127 case PTRACE_POKETEXT: /* write the word at location addr. */
137 case PTRACE_POKEDATA: 128 case PTRACE_POKEDATA:
138 ret = 0; 129 ret = generic_ptrace_pokedata(child, addr, data);
139 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
140 break;
141 ret = -EIO;
142 break; 130 break;
143 131
144 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 132 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index 05a40f3c30bf..502d43e4785c 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -103,6 +103,7 @@ void die(const char * str, struct pt_regs * regs, long err)
103 (unsigned long)task_stack_page(current)); 103 (unsigned long)task_stack_page(current));
104 104
105 bust_spinlocks(0); 105 bust_spinlocks(0);
106 add_taint(TAINT_DIE);
106 spin_unlock_irq(&die_lock); 107 spin_unlock_irq(&die_lock);
107 108
108 if (kexec_should_crash(current)) 109 if (kexec_should_crash(current))
diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c
index 4e95e18b46d9..df06c6477468 100644
--- a/arch/sh64/kernel/ptrace.c
+++ b/arch/sh64/kernel/ptrace.c
@@ -129,17 +129,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
129 switch (request) { 129 switch (request) {
130 /* when I and D space are separate, these will need to be fixed. */ 130 /* when I and D space are separate, these will need to be fixed. */
131 case PTRACE_PEEKTEXT: /* read word at location addr. */ 131 case PTRACE_PEEKTEXT: /* read word at location addr. */
132 case PTRACE_PEEKDATA: { 132 case PTRACE_PEEKDATA:
133 unsigned long tmp; 133 ret = generic_ptrace_peekdata(child, addr, data);
134 int copied;
135
136 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
137 ret = -EIO;
138 if (copied != sizeof(tmp))
139 break;
140 ret = put_user(tmp,(unsigned long *) data);
141 break; 134 break;
142 }
143 135
144 /* read the word at location addr in the USER area. */ 136 /* read the word at location addr in the USER area. */
145 case PTRACE_PEEKUSR: { 137 case PTRACE_PEEKUSR: {
@@ -166,10 +158,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
166 /* when I and D space are separate, this will have to be fixed. */ 158 /* when I and D space are separate, this will have to be fixed. */
167 case PTRACE_POKETEXT: /* write the word at location addr. */ 159 case PTRACE_POKETEXT: /* write the word at location addr. */
168 case PTRACE_POKEDATA: 160 case PTRACE_POKEDATA:
169 ret = 0; 161 ret = generic_ptrace_pokedata(child, addr, data);
170 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
171 break;
172 ret = -EIO;
173 break; 162 break;
174 163
175 case PTRACE_POKEUSR: 164 case PTRACE_POKEUSR:
diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c
index 4b2676380deb..bd5501760240 100644
--- a/arch/sh64/lib/c-checksum.c
+++ b/arch/sh64/lib/c-checksum.c
@@ -213,3 +213,4 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
213 213
214 return (__wsum)result; 214 return (__wsum)result;
215} 215}
216EXPORT_SYMBOL(csum_tcpudp_nofold);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 8567cc901942..73df7115325b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -217,6 +217,9 @@ source "drivers/pci/Kconfig"
217 217
218endif 218endif
219 219
220config NO_DMA
221 def_bool !PCI
222
220config SUN_OPENPROMFS 223config SUN_OPENPROMFS
221 tristate "Openprom tree appears in /proc/openprom" 224 tristate "Openprom tree appears in /proc/openprom"
222 help 225 help
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index dc9ffea2a4f7..3bc3bff51e08 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -101,6 +101,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
101 101
102 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter); 102 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter);
103 show_regs(regs); 103 show_regs(regs);
104 add_taint(TAINT_DIE);
104 105
105 __SAVE; __SAVE; __SAVE; __SAVE; 106 __SAVE; __SAVE; __SAVE; __SAVE;
106 __SAVE; __SAVE; __SAVE; __SAVE; 107 __SAVE; __SAVE; __SAVE; __SAVE;
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index 65840a62bb9c..45ebf91a280c 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.22-rc1 3# Linux kernel version: 2.6.22
4# Mon May 14 04:17:48 2007 4# Tue Jul 17 01:19:52 2007
5# 5#
6CONFIG_SPARC=y 6CONFIG_SPARC=y
7CONFIG_SPARC64=y 7CONFIG_SPARC64=y
@@ -42,12 +42,11 @@ CONFIG_LOCALVERSION=""
42# CONFIG_LOCALVERSION_AUTO is not set 42# CONFIG_LOCALVERSION_AUTO is not set
43CONFIG_SWAP=y 43CONFIG_SWAP=y
44CONFIG_SYSVIPC=y 44CONFIG_SYSVIPC=y
45# CONFIG_IPC_NS is not set
46CONFIG_SYSVIPC_SYSCTL=y 45CONFIG_SYSVIPC_SYSCTL=y
47CONFIG_POSIX_MQUEUE=y 46CONFIG_POSIX_MQUEUE=y
48# CONFIG_BSD_PROCESS_ACCT is not set 47# CONFIG_BSD_PROCESS_ACCT is not set
49# CONFIG_TASKSTATS is not set 48# CONFIG_TASKSTATS is not set
50# CONFIG_UTS_NS is not set 49# CONFIG_USER_NS is not set
51# CONFIG_AUDIT is not set 50# CONFIG_AUDIT is not set
52# CONFIG_IKCONFIG is not set 51# CONFIG_IKCONFIG is not set
53CONFIG_LOG_BUF_SHIFT=18 52CONFIG_LOG_BUF_SHIFT=18
@@ -82,22 +81,15 @@ CONFIG_SLUB=y
82CONFIG_RT_MUTEXES=y 81CONFIG_RT_MUTEXES=y
83# CONFIG_TINY_SHMEM is not set 82# CONFIG_TINY_SHMEM is not set
84CONFIG_BASE_SMALL=0 83CONFIG_BASE_SMALL=0
85
86#
87# Loadable module support
88#
89CONFIG_MODULES=y 84CONFIG_MODULES=y
90CONFIG_MODULE_UNLOAD=y 85CONFIG_MODULE_UNLOAD=y
91CONFIG_MODULE_FORCE_UNLOAD=y 86CONFIG_MODULE_FORCE_UNLOAD=y
92CONFIG_MODVERSIONS=y 87CONFIG_MODVERSIONS=y
93CONFIG_MODULE_SRCVERSION_ALL=y 88CONFIG_MODULE_SRCVERSION_ALL=y
94CONFIG_KMOD=y 89CONFIG_KMOD=y
95
96#
97# Block layer
98#
99CONFIG_BLOCK=y 90CONFIG_BLOCK=y
100CONFIG_BLK_DEV_IO_TRACE=y 91CONFIG_BLK_DEV_IO_TRACE=y
92CONFIG_BLK_DEV_BSG=y
101 93
102# 94#
103# IO Schedulers 95# IO Schedulers
@@ -156,12 +148,15 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
156CONFIG_RESOURCES_64BIT=y 148CONFIG_RESOURCES_64BIT=y
157CONFIG_ZONE_DMA_FLAG=0 149CONFIG_ZONE_DMA_FLAG=0
158CONFIG_NR_QUICK=1 150CONFIG_NR_QUICK=1
151CONFIG_VIRT_TO_BUS=y
159CONFIG_SBUS=y 152CONFIG_SBUS=y
160CONFIG_SBUSCHAR=y 153CONFIG_SBUSCHAR=y
161CONFIG_SUN_AUXIO=y 154CONFIG_SUN_AUXIO=y
162CONFIG_SUN_IO=y 155CONFIG_SUN_IO=y
156# CONFIG_SUN_LDOMS is not set
163CONFIG_PCI=y 157CONFIG_PCI=y
164CONFIG_PCI_DOMAINS=y 158CONFIG_PCI_DOMAINS=y
159CONFIG_PCI_SYSCALL=y
165CONFIG_ARCH_SUPPORTS_MSI=y 160CONFIG_ARCH_SUPPORTS_MSI=y
166CONFIG_PCI_MSI=y 161CONFIG_PCI_MSI=y
167# CONFIG_PCI_DEBUG is not set 162# CONFIG_PCI_DEBUG is not set
@@ -246,10 +241,6 @@ CONFIG_IPV6_TUNNEL=m
246# CONFIG_IPV6_MULTIPLE_TABLES is not set 241# CONFIG_IPV6_MULTIPLE_TABLES is not set
247# CONFIG_NETWORK_SECMARK is not set 242# CONFIG_NETWORK_SECMARK is not set
248# CONFIG_NETFILTER is not set 243# CONFIG_NETFILTER is not set
249
250#
251# DCCP Configuration (EXPERIMENTAL)
252#
253CONFIG_IP_DCCP=m 244CONFIG_IP_DCCP=m
254CONFIG_INET_DCCP_DIAG=m 245CONFIG_INET_DCCP_DIAG=m
255CONFIG_IP_DCCP_ACKVEC=y 246CONFIG_IP_DCCP_ACKVEC=y
@@ -269,15 +260,7 @@ CONFIG_IP_DCCP_CCID3_RTO=100
269# 260#
270# CONFIG_IP_DCCP_DEBUG is not set 261# CONFIG_IP_DCCP_DEBUG is not set
271# CONFIG_NET_DCCPPROBE is not set 262# CONFIG_NET_DCCPPROBE is not set
272
273#
274# SCTP Configuration (EXPERIMENTAL)
275#
276# CONFIG_IP_SCTP is not set 263# CONFIG_IP_SCTP is not set
277
278#
279# TIPC Configuration (EXPERIMENTAL)
280#
281# CONFIG_TIPC is not set 264# CONFIG_TIPC is not set
282# CONFIG_ATM is not set 265# CONFIG_ATM is not set
283# CONFIG_BRIDGE is not set 266# CONFIG_BRIDGE is not set
@@ -314,6 +297,7 @@ CONFIG_NET_TCPPROBE=m
314# CONFIG_MAC80211 is not set 297# CONFIG_MAC80211 is not set
315# CONFIG_IEEE80211 is not set 298# CONFIG_IEEE80211 is not set
316# CONFIG_RFKILL is not set 299# CONFIG_RFKILL is not set
300# CONFIG_NET_9P is not set
317 301
318# 302#
319# Device Drivers 303# Device Drivers
@@ -328,26 +312,10 @@ CONFIG_FW_LOADER=y
328# CONFIG_DEBUG_DRIVER is not set 312# CONFIG_DEBUG_DRIVER is not set
329# CONFIG_DEBUG_DEVRES is not set 313# CONFIG_DEBUG_DEVRES is not set
330# CONFIG_SYS_HYPERVISOR is not set 314# CONFIG_SYS_HYPERVISOR is not set
331
332#
333# Connector - unified userspace <-> kernelspace linker
334#
335CONFIG_CONNECTOR=m 315CONFIG_CONNECTOR=m
336# CONFIG_MTD is not set 316# CONFIG_MTD is not set
337
338#
339# Parallel port support
340#
341# CONFIG_PARPORT is not set 317# CONFIG_PARPORT is not set
342 318CONFIG_BLK_DEV=y
343#
344# Plug and Play support
345#
346# CONFIG_PNPACPI is not set
347
348#
349# Block devices
350#
351# CONFIG_BLK_DEV_FD is not set 319# CONFIG_BLK_DEV_FD is not set
352# CONFIG_BLK_CPQ_DA is not set 320# CONFIG_BLK_CPQ_DA is not set
353# CONFIG_BLK_CPQ_CISS_DA is not set 321# CONFIG_BLK_CPQ_CISS_DA is not set
@@ -364,18 +332,11 @@ CONFIG_CDROM_PKTCDVD=m
364CONFIG_CDROM_PKTCDVD_BUFFERS=8 332CONFIG_CDROM_PKTCDVD_BUFFERS=8
365CONFIG_CDROM_PKTCDVD_WCACHE=y 333CONFIG_CDROM_PKTCDVD_WCACHE=y
366CONFIG_ATA_OVER_ETH=m 334CONFIG_ATA_OVER_ETH=m
367 335CONFIG_MISC_DEVICES=y
368#
369# Misc devices
370#
371# CONFIG_PHANTOM is not set 336# CONFIG_PHANTOM is not set
337# CONFIG_EEPROM_93CX6 is not set
372# CONFIG_SGI_IOC4 is not set 338# CONFIG_SGI_IOC4 is not set
373# CONFIG_TIFM_CORE is not set 339# CONFIG_TIFM_CORE is not set
374# CONFIG_BLINK is not set
375
376#
377# ATA/ATAPI/MFM/RLL support
378#
379CONFIG_IDE=y 340CONFIG_IDE=y
380CONFIG_BLK_DEV_IDE=y 341CONFIG_BLK_DEV_IDE=y
381 342
@@ -440,6 +401,7 @@ CONFIG_BLK_DEV_IDEDMA=y
440# 401#
441CONFIG_RAID_ATTRS=m 402CONFIG_RAID_ATTRS=m
442CONFIG_SCSI=y 403CONFIG_SCSI=y
404CONFIG_SCSI_DMA=y
443# CONFIG_SCSI_TGT is not set 405# CONFIG_SCSI_TGT is not set
444CONFIG_SCSI_NETLINK=y 406CONFIG_SCSI_NETLINK=y
445CONFIG_SCSI_PROC_FS=y 407CONFIG_SCSI_PROC_FS=y
@@ -505,7 +467,6 @@ CONFIG_ISCSI_TCP=m
505# CONFIG_SCSI_DC395x is not set 467# CONFIG_SCSI_DC395x is not set
506# CONFIG_SCSI_DC390T is not set 468# CONFIG_SCSI_DC390T is not set
507# CONFIG_SCSI_DEBUG is not set 469# CONFIG_SCSI_DEBUG is not set
508# CONFIG_SCSI_ESP_CORE is not set
509# CONFIG_SCSI_SUNESP is not set 470# CONFIG_SCSI_SUNESP is not set
510# CONFIG_SCSI_SRP is not set 471# CONFIG_SCSI_SRP is not set
511# CONFIG_ATA is not set 472# CONFIG_ATA is not set
@@ -545,30 +506,16 @@ CONFIG_DM_ZERO=m
545# 506#
546# CONFIG_FIREWIRE is not set 507# CONFIG_FIREWIRE is not set
547# CONFIG_IEEE1394 is not set 508# CONFIG_IEEE1394 is not set
548
549#
550# I2O device support
551#
552# CONFIG_I2O is not set 509# CONFIG_I2O is not set
553
554#
555# Network device support
556#
557CONFIG_NETDEVICES=y 510CONFIG_NETDEVICES=y
511# CONFIG_NETDEVICES_MULTIQUEUE is not set
558CONFIG_DUMMY=m 512CONFIG_DUMMY=m
559# CONFIG_BONDING is not set 513# CONFIG_BONDING is not set
514# CONFIG_MACVLAN is not set
560# CONFIG_EQUALIZER is not set 515# CONFIG_EQUALIZER is not set
561# CONFIG_TUN is not set 516# CONFIG_TUN is not set
562
563#
564# ARCnet devices
565#
566# CONFIG_ARCNET is not set 517# CONFIG_ARCNET is not set
567# CONFIG_PHYLIB is not set 518# CONFIG_PHYLIB is not set
568
569#
570# Ethernet (10 or 100Mbit)
571#
572CONFIG_NET_ETHERNET=y 519CONFIG_NET_ETHERNET=y
573CONFIG_MII=m 520CONFIG_MII=m
574# CONFIG_SUNLANCE is not set 521# CONFIG_SUNLANCE is not set
@@ -578,10 +525,6 @@ CONFIG_MII=m
578# CONFIG_SUNGEM is not set 525# CONFIG_SUNGEM is not set
579CONFIG_CASSINI=m 526CONFIG_CASSINI=m
580# CONFIG_NET_VENDOR_3COM is not set 527# CONFIG_NET_VENDOR_3COM is not set
581
582#
583# Tulip family network device support
584#
585# CONFIG_NET_TULIP is not set 528# CONFIG_NET_TULIP is not set
586# CONFIG_HP100 is not set 529# CONFIG_HP100 is not set
587CONFIG_NET_PCI=y 530CONFIG_NET_PCI=y
@@ -617,7 +560,6 @@ CONFIG_E1000_NAPI=y
617# CONFIG_SIS190 is not set 560# CONFIG_SIS190 is not set
618# CONFIG_SKGE is not set 561# CONFIG_SKGE is not set
619# CONFIG_SKY2 is not set 562# CONFIG_SKY2 is not set
620# CONFIG_SK98LIN is not set
621# CONFIG_VIA_VELOCITY is not set 563# CONFIG_VIA_VELOCITY is not set
622CONFIG_TIGON3=m 564CONFIG_TIGON3=m
623CONFIG_BNX2=m 565CONFIG_BNX2=m
@@ -631,11 +573,6 @@ CONFIG_NETDEV_10000=y
631# CONFIG_MYRI10GE is not set 573# CONFIG_MYRI10GE is not set
632# CONFIG_NETXEN_NIC is not set 574# CONFIG_NETXEN_NIC is not set
633# CONFIG_MLX4_CORE is not set 575# CONFIG_MLX4_CORE is not set
634CONFIG_MLX4_DEBUG=y
635
636#
637# Token Ring devices
638#
639# CONFIG_TR is not set 576# CONFIG_TR is not set
640 577
641# 578#
@@ -665,6 +602,7 @@ CONFIG_PPP_DEFLATE=m
665CONFIG_PPP_BSDCOMP=m 602CONFIG_PPP_BSDCOMP=m
666CONFIG_PPP_MPPE=m 603CONFIG_PPP_MPPE=m
667CONFIG_PPPOE=m 604CONFIG_PPPOE=m
605# CONFIG_PPPOL2TP is not set
668# CONFIG_SLIP is not set 606# CONFIG_SLIP is not set
669CONFIG_SLHC=m 607CONFIG_SLHC=m
670# CONFIG_NET_FC is not set 608# CONFIG_NET_FC is not set
@@ -677,10 +615,6 @@ CONFIG_SLHC=m
677# ISDN subsystem 615# ISDN subsystem
678# 616#
679# CONFIG_ISDN is not set 617# CONFIG_ISDN is not set
680
681#
682# Telephony Support
683#
684# CONFIG_PHONE is not set 618# CONFIG_PHONE is not set
685 619
686# 620#
@@ -688,6 +622,7 @@ CONFIG_SLHC=m
688# 622#
689CONFIG_INPUT=y 623CONFIG_INPUT=y
690# CONFIG_INPUT_FF_MEMLESS is not set 624# CONFIG_INPUT_FF_MEMLESS is not set
625# CONFIG_INPUT_POLLDEV is not set
691 626
692# 627#
693# Userland interfaces 628# Userland interfaces
@@ -733,7 +668,6 @@ CONFIG_INPUT_SPARCSPKR=y
733# CONFIG_INPUT_POWERMATE is not set 668# CONFIG_INPUT_POWERMATE is not set
734# CONFIG_INPUT_YEALINK is not set 669# CONFIG_INPUT_YEALINK is not set
735# CONFIG_INPUT_UINPUT is not set 670# CONFIG_INPUT_UINPUT is not set
736# CONFIG_INPUT_POLLDEV is not set
737 671
738# 672#
739# Hardware I/O ports 673# Hardware I/O ports
@@ -773,10 +707,6 @@ CONFIG_SERIAL_CORE_CONSOLE=y
773# CONFIG_SERIAL_JSM is not set 707# CONFIG_SERIAL_JSM is not set
774CONFIG_UNIX98_PTYS=y 708CONFIG_UNIX98_PTYS=y
775# CONFIG_LEGACY_PTYS is not set 709# CONFIG_LEGACY_PTYS is not set
776
777#
778# IPMI
779#
780# CONFIG_IPMI_HANDLER is not set 710# CONFIG_IPMI_HANDLER is not set
781# CONFIG_WATCHDOG is not set 711# CONFIG_WATCHDOG is not set
782# CONFIG_HW_RANDOM is not set 712# CONFIG_HW_RANDOM is not set
@@ -785,10 +715,6 @@ CONFIG_RTC=y
785# CONFIG_APPLICOM is not set 715# CONFIG_APPLICOM is not set
786# CONFIG_DRM is not set 716# CONFIG_DRM is not set
787# CONFIG_RAW_DRIVER is not set 717# CONFIG_RAW_DRIVER is not set
788
789#
790# TPM devices
791#
792# CONFIG_TCG_TPM is not set 718# CONFIG_TCG_TPM is not set
793CONFIG_DEVPORT=y 719CONFIG_DEVPORT=y
794CONFIG_I2C=y 720CONFIG_I2C=y
@@ -822,6 +748,7 @@ CONFIG_I2C_ALGOBIT=y
822# CONFIG_I2C_SIS5595 is not set 748# CONFIG_I2C_SIS5595 is not set
823# CONFIG_I2C_SIS630 is not set 749# CONFIG_I2C_SIS630 is not set
824# CONFIG_I2C_SIS96X is not set 750# CONFIG_I2C_SIS96X is not set
751# CONFIG_I2C_TAOS_EVM is not set
825# CONFIG_I2C_STUB is not set 752# CONFIG_I2C_STUB is not set
826# CONFIG_I2C_TINY_USB is not set 753# CONFIG_I2C_TINY_USB is not set
827# CONFIG_I2C_VIA is not set 754# CONFIG_I2C_VIA is not set
@@ -833,11 +760,13 @@ CONFIG_I2C_ALGOBIT=y
833# 760#
834# CONFIG_SENSORS_DS1337 is not set 761# CONFIG_SENSORS_DS1337 is not set
835# CONFIG_SENSORS_DS1374 is not set 762# CONFIG_SENSORS_DS1374 is not set
763# CONFIG_DS1682 is not set
836# CONFIG_SENSORS_EEPROM is not set 764# CONFIG_SENSORS_EEPROM is not set
837# CONFIG_SENSORS_PCF8574 is not set 765# CONFIG_SENSORS_PCF8574 is not set
838# CONFIG_SENSORS_PCA9539 is not set 766# CONFIG_SENSORS_PCA9539 is not set
839# CONFIG_SENSORS_PCF8591 is not set 767# CONFIG_SENSORS_PCF8591 is not set
840# CONFIG_SENSORS_MAX6875 is not set 768# CONFIG_SENSORS_MAX6875 is not set
769# CONFIG_SENSORS_TSL2550 is not set
841# CONFIG_I2C_DEBUG_CORE is not set 770# CONFIG_I2C_DEBUG_CORE is not set
842# CONFIG_I2C_DEBUG_ALGO is not set 771# CONFIG_I2C_DEBUG_ALGO is not set
843# CONFIG_I2C_DEBUG_BUS is not set 772# CONFIG_I2C_DEBUG_BUS is not set
@@ -848,11 +777,8 @@ CONFIG_I2C_ALGOBIT=y
848# 777#
849# CONFIG_SPI is not set 778# CONFIG_SPI is not set
850# CONFIG_SPI_MASTER is not set 779# CONFIG_SPI_MASTER is not set
851
852#
853# Dallas's 1-wire bus
854#
855# CONFIG_W1 is not set 780# CONFIG_W1 is not set
781# CONFIG_POWER_SUPPLY is not set
856CONFIG_HWMON=y 782CONFIG_HWMON=y
857# CONFIG_HWMON_VID is not set 783# CONFIG_HWMON_VID is not set
858# CONFIG_SENSORS_ABITUGURU is not set 784# CONFIG_SENSORS_ABITUGURU is not set
@@ -949,6 +875,8 @@ CONFIG_FB_TILEBLITTING=y
949# CONFIG_FB_ASILIANT is not set 875# CONFIG_FB_ASILIANT is not set
950# CONFIG_FB_IMSTT is not set 876# CONFIG_FB_IMSTT is not set
951# CONFIG_FB_SBUS is not set 877# CONFIG_FB_SBUS is not set
878# CONFIG_FB_XVR500 is not set
879# CONFIG_FB_XVR2500 is not set
952# CONFIG_FB_S1D13XXX is not set 880# CONFIG_FB_S1D13XXX is not set
953# CONFIG_FB_NVIDIA is not set 881# CONFIG_FB_NVIDIA is not set
954# CONFIG_FB_RIVA is not set 882# CONFIG_FB_RIVA is not set
@@ -970,9 +898,6 @@ CONFIG_FB_RADEON_I2C=y
970# CONFIG_FB_TRIDENT is not set 898# CONFIG_FB_TRIDENT is not set
971# CONFIG_FB_ARK is not set 899# CONFIG_FB_ARK is not set
972# CONFIG_FB_PM3 is not set 900# CONFIG_FB_PM3 is not set
973# CONFIG_FB_XVR500 is not set
974# CONFIG_FB_XVR2500 is not set
975# CONFIG_FB_PCI is not set
976# CONFIG_FB_VIRTUAL is not set 901# CONFIG_FB_VIRTUAL is not set
977 902
978# 903#
@@ -1118,10 +1043,7 @@ CONFIG_SND_SUN_CS4231=m
1118# 1043#
1119# CONFIG_SOUND_PRIME is not set 1044# CONFIG_SOUND_PRIME is not set
1120CONFIG_AC97_BUS=m 1045CONFIG_AC97_BUS=m
1121 1046CONFIG_HID_SUPPORT=y
1122#
1123# HID Devices
1124#
1125CONFIG_HID=y 1047CONFIG_HID=y
1126# CONFIG_HID_DEBUG is not set 1048# CONFIG_HID_DEBUG is not set
1127 1049
@@ -1132,10 +1054,7 @@ CONFIG_USB_HID=y
1132# CONFIG_USB_HIDINPUT_POWERBOOK is not set 1054# CONFIG_USB_HIDINPUT_POWERBOOK is not set
1133# CONFIG_HID_FF is not set 1055# CONFIG_HID_FF is not set
1134CONFIG_USB_HIDDEV=y 1056CONFIG_USB_HIDDEV=y
1135 1057CONFIG_USB_SUPPORT=y
1136#
1137# USB support
1138#
1139CONFIG_USB_ARCH_HAS_HCD=y 1058CONFIG_USB_ARCH_HAS_HCD=y
1140CONFIG_USB_ARCH_HAS_OHCI=y 1059CONFIG_USB_ARCH_HAS_OHCI=y
1141CONFIG_USB_ARCH_HAS_EHCI=y 1060CONFIG_USB_ARCH_HAS_EHCI=y
@@ -1157,7 +1076,6 @@ CONFIG_USB_EHCI_HCD=m
1157# CONFIG_USB_EHCI_SPLIT_ISO is not set 1076# CONFIG_USB_EHCI_SPLIT_ISO is not set
1158# CONFIG_USB_EHCI_ROOT_HUB_TT is not set 1077# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
1159# CONFIG_USB_EHCI_TT_NEWSCHED is not set 1078# CONFIG_USB_EHCI_TT_NEWSCHED is not set
1160# CONFIG_USB_EHCI_BIG_ENDIAN_MMIO is not set
1161# CONFIG_USB_ISP116X_HCD is not set 1079# CONFIG_USB_ISP116X_HCD is not set
1162CONFIG_USB_OHCI_HCD=y 1080CONFIG_USB_OHCI_HCD=y
1163# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set 1081# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
@@ -1165,6 +1083,7 @@ CONFIG_USB_OHCI_HCD=y
1165CONFIG_USB_OHCI_LITTLE_ENDIAN=y 1083CONFIG_USB_OHCI_LITTLE_ENDIAN=y
1166CONFIG_USB_UHCI_HCD=m 1084CONFIG_USB_UHCI_HCD=m
1167# CONFIG_USB_SL811_HCD is not set 1085# CONFIG_USB_SL811_HCD is not set
1086# CONFIG_USB_R8A66597_HCD is not set
1168 1087
1169# 1088#
1170# USB Device Class drivers 1089# USB Device Class drivers
@@ -1256,17 +1175,9 @@ CONFIG_USB_STORAGE=m
1256# 1175#
1257# LED Triggers 1176# LED Triggers
1258# 1177#
1259
1260#
1261# InfiniBand support
1262#
1263# CONFIG_INFINIBAND is not set 1178# CONFIG_INFINIBAND is not set
1264 1179
1265# 1180#
1266# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
1267#
1268
1269#
1270# Real Time Clock 1181# Real Time Clock
1271# 1182#
1272# CONFIG_RTC_CLASS is not set 1183# CONFIG_RTC_CLASS is not set
@@ -1387,7 +1298,6 @@ CONFIG_RAMFS=y
1387# CONFIG_NCP_FS is not set 1298# CONFIG_NCP_FS is not set
1388# CONFIG_CODA_FS is not set 1299# CONFIG_CODA_FS is not set
1389# CONFIG_AFS_FS is not set 1300# CONFIG_AFS_FS is not set
1390# CONFIG_9P_FS is not set
1391 1301
1392# 1302#
1393# Partition Types 1303# Partition Types
@@ -1465,8 +1375,10 @@ CONFIG_DEBUG_FS=y
1465CONFIG_DEBUG_KERNEL=y 1375CONFIG_DEBUG_KERNEL=y
1466# CONFIG_DEBUG_SHIRQ is not set 1376# CONFIG_DEBUG_SHIRQ is not set
1467CONFIG_DETECT_SOFTLOCKUP=y 1377CONFIG_DETECT_SOFTLOCKUP=y
1378# CONFIG_SCHED_DEBUG is not set
1468CONFIG_SCHEDSTATS=y 1379CONFIG_SCHEDSTATS=y
1469# CONFIG_TIMER_STATS is not set 1380# CONFIG_TIMER_STATS is not set
1381# CONFIG_SLUB_DEBUG_ON is not set
1470# CONFIG_DEBUG_RT_MUTEXES is not set 1382# CONFIG_DEBUG_RT_MUTEXES is not set
1471# CONFIG_RT_MUTEX_TESTER is not set 1383# CONFIG_RT_MUTEX_TESTER is not set
1472# CONFIG_DEBUG_SPINLOCK is not set 1384# CONFIG_DEBUG_SPINLOCK is not set
@@ -1496,10 +1408,10 @@ CONFIG_FORCED_INLINING=y
1496CONFIG_KEYS=y 1408CONFIG_KEYS=y
1497# CONFIG_KEYS_DEBUG_PROC_KEYS is not set 1409# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
1498# CONFIG_SECURITY is not set 1410# CONFIG_SECURITY is not set
1499 1411CONFIG_XOR_BLOCKS=m
1500# 1412CONFIG_ASYNC_CORE=m
1501# Cryptographic options 1413CONFIG_ASYNC_MEMCPY=m
1502# 1414CONFIG_ASYNC_XOR=m
1503CONFIG_CRYPTO=y 1415CONFIG_CRYPTO=y
1504CONFIG_CRYPTO_ALGAPI=y 1416CONFIG_CRYPTO_ALGAPI=y
1505CONFIG_CRYPTO_BLKCIPHER=y 1417CONFIG_CRYPTO_BLKCIPHER=y
@@ -1539,10 +1451,7 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
1539CONFIG_CRYPTO_CRC32C=m 1451CONFIG_CRYPTO_CRC32C=m
1540CONFIG_CRYPTO_CAMELLIA=m 1452CONFIG_CRYPTO_CAMELLIA=m
1541CONFIG_CRYPTO_TEST=m 1453CONFIG_CRYPTO_TEST=m
1542 1454CONFIG_CRYPTO_HW=y
1543#
1544# Hardware crypto devices
1545#
1546 1455
1547# 1456#
1548# Library routines 1457# Library routines
diff --git a/arch/sparc64/kernel/hvtramp.S b/arch/sparc64/kernel/hvtramp.S
index 76a090e2c2a8..a55c252e18cc 100644
--- a/arch/sparc64/kernel/hvtramp.S
+++ b/arch/sparc64/kernel/hvtramp.S
@@ -10,6 +10,7 @@
10#include <asm/hvtramp.h> 10#include <asm/hvtramp.h>
11#include <asm/pstate.h> 11#include <asm/pstate.h>
12#include <asm/ptrace.h> 12#include <asm/ptrace.h>
13#include <asm/head.h>
13#include <asm/asi.h> 14#include <asm/asi.h>
14 15
15 .text 16 .text
@@ -28,7 +29,7 @@
28 * First setup basic privileged cpu state. 29 * First setup basic privileged cpu state.
29 */ 30 */
30hv_cpu_startup: 31hv_cpu_startup:
31 wrpr %g0, 0, %gl 32 SET_GL(0)
32 wrpr %g0, 15, %pil 33 wrpr %g0, 15, %pil
33 wrpr %g0, 0, %canrestore 34 wrpr %g0, 0, %canrestore
34 wrpr %g0, 0, %otherwin 35 wrpr %g0, 0, %otherwin
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index 203e87301005..fb13775b3682 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -289,9 +289,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
289 struct rt_signal_frame __user *sf; 289 struct rt_signal_frame __user *sf;
290 unsigned long tpc, tnpc, tstate; 290 unsigned long tpc, tnpc, tstate;
291 __siginfo_fpu_t __user *fpu_save; 291 __siginfo_fpu_t __user *fpu_save;
292 mm_segment_t old_fs;
293 sigset_t set; 292 sigset_t set;
294 stack_t st;
295 int err; 293 int err;
296 294
297 /* Always make any pending restarted system calls return -EINTR */ 295 /* Always make any pending restarted system calls return -EINTR */
@@ -327,20 +325,13 @@ void do_rt_sigreturn(struct pt_regs *regs)
327 err |= restore_fpu_state(regs, &sf->fpu_state); 325 err |= restore_fpu_state(regs, &sf->fpu_state);
328 326
329 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); 327 err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
330 err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t)); 328 err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
331 329
332 if (err) 330 if (err)
333 goto segv; 331 goto segv;
334 332
335 regs->tpc = tpc; 333 regs->tpc = tpc;
336 regs->tnpc = tnpc; 334 regs->tnpc = tnpc;
337
338 /* It is more difficult to avoid calling this function than to
339 call it and ignore errors. */
340 old_fs = get_fs();
341 set_fs(KERNEL_DS);
342 do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf);
343 set_fs(old_fs);
344 335
345 sigdelsetmask(&set, ~_BLOCKABLE); 336 sigdelsetmask(&set, ~_BLOCKABLE);
346 spin_lock_irq(&current->sighand->siglock); 337 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 00a9e3286c83..6ef2d299fb10 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2225,6 +2225,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2225 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); 2225 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2226 __asm__ __volatile__("flushw"); 2226 __asm__ __volatile__("flushw");
2227 __show_regs(regs); 2227 __show_regs(regs);
2228 add_taint(TAINT_DIE);
2228 if (regs->tstate & TSTATE_PRIV) { 2229 if (regs->tstate & TSTATE_PRIV) {
2229 struct reg_window *rw = (struct reg_window *) 2230 struct reg_window *rw = (struct reg_window *)
2230 (regs->u_regs[UREG_FP] + STACK_BIAS); 2231 (regs->u_regs[UREG_FP] + STACK_BIAS);
diff --git a/arch/um/drivers/pcap_user.c b/arch/um/drivers/pcap_user.c
index 483aa15222a4..1316456e2a28 100644
--- a/arch/um/drivers/pcap_user.c
+++ b/arch/um/drivers/pcap_user.c
@@ -53,7 +53,7 @@ static int pcap_open(void *data)
53 return -EIO; 53 return -EIO;
54 } 54 }
55 55
56 pri->compiled = um_kmalloc(sizeof(struct bpf_program)); 56 pri->compiled = kmalloc(sizeof(struct bpf_program), UM_GFP_KERNEL);
57 if(pri->compiled == NULL){ 57 if(pri->compiled == NULL){
58 printk(UM_KERN_ERR "pcap_open : kmalloc failed\n"); 58 printk(UM_KERN_ERR "pcap_open : kmalloc failed\n");
59 return -ENOMEM; 59 return -ENOMEM;
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index 627742d89434..6916c8888dba 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -52,17 +52,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
52 switch (request) { 52 switch (request) {
53 /* when I and D space are separate, these will need to be fixed. */ 53 /* when I and D space are separate, these will need to be fixed. */
54 case PTRACE_PEEKTEXT: /* read word at location addr. */ 54 case PTRACE_PEEKTEXT: /* read word at location addr. */
55 case PTRACE_PEEKDATA: { 55 case PTRACE_PEEKDATA:
56 unsigned long tmp; 56 ret = generic_ptrace_peekdata(child, addr, data);
57 int copied;
58
59 ret = -EIO;
60 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
61 if (copied != sizeof(tmp))
62 break;
63 ret = put_user(tmp, p);
64 break; 57 break;
65 }
66 58
67 /* read the word at location addr in the USER area. */ 59 /* read the word at location addr in the USER area. */
68 case PTRACE_PEEKUSR: 60 case PTRACE_PEEKUSR:
@@ -72,11 +64,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
72 /* when I and D space are separate, this will have to be fixed. */ 64 /* when I and D space are separate, this will have to be fixed. */
73 case PTRACE_POKETEXT: /* write the word at location addr. */ 65 case PTRACE_POKETEXT: /* write the word at location addr. */
74 case PTRACE_POKEDATA: 66 case PTRACE_POKEDATA:
75 ret = -EIO; 67 ret = generic_ptrace_pokedata(child, addr, data);
76 if (access_process_vm(child, addr, &data, sizeof(data),
77 1) != sizeof(data))
78 break;
79 ret = 0;
80 break; 68 break;
81 69
82 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 70 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/v850/kernel/ptrace.c b/arch/v850/kernel/ptrace.c
index a9b09343097d..a458ac941b25 100644
--- a/arch/v850/kernel/ptrace.c
+++ b/arch/v850/kernel/ptrace.c
@@ -117,24 +117,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
117 int rval; 117 int rval;
118 118
119 switch (request) { 119 switch (request) {
120 unsigned long val, copied; 120 unsigned long val;
121 121
122 case PTRACE_PEEKTEXT: /* read word at location addr. */ 122 case PTRACE_PEEKTEXT: /* read word at location addr. */
123 case PTRACE_PEEKDATA: 123 case PTRACE_PEEKDATA:
124 copied = access_process_vm(child, addr, &val, sizeof(val), 0); 124 rval = generic_ptrace_peekdata(child, addr, data);
125 rval = -EIO;
126 if (copied != sizeof(val))
127 break;
128 rval = put_user(val, (unsigned long *)data);
129 goto out; 125 goto out;
130 126
131 case PTRACE_POKETEXT: /* write the word at location addr. */ 127 case PTRACE_POKETEXT: /* write the word at location addr. */
132 case PTRACE_POKEDATA: 128 case PTRACE_POKEDATA:
133 rval = 0; 129 rval = generic_ptrace_pokedata(child, addr, data);
134 if (access_process_vm(child, addr, &data, sizeof(data), 1)
135 == sizeof(data))
136 break;
137 rval = -EIO;
138 goto out; 130 goto out;
139 131
140 /* Read/write the word at location ADDR in the registers. */ 132 /* Read/write the word at location ADDR in the registers. */
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 8bdd25ac1542..14bf8ce3ea23 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -774,8 +774,8 @@ menu "Instrumentation Support"
774source "arch/x86_64/oprofile/Kconfig" 774source "arch/x86_64/oprofile/Kconfig"
775 775
776config KPROBES 776config KPROBES
777 bool "Kprobes (EXPERIMENTAL)" 777 bool "Kprobes"
778 depends on KALLSYMS && EXPERIMENTAL && MODULES 778 depends on KALLSYMS && MODULES
779 help 779 help
780 Kprobes allows you to trap at almost any kernel address and 780 Kprobes allows you to trap at almost any kernel address and
781 execute a callback function. register_kprobe() establishes 781 execute a callback function. register_kprobe() establishes
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 931c64bad5e6..edbbc59b7523 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -296,7 +296,7 @@ static DEFINE_PER_CPU(unsigned, last_irq_sum);
296static DEFINE_PER_CPU(local_t, alert_counter); 296static DEFINE_PER_CPU(local_t, alert_counter);
297static DEFINE_PER_CPU(int, nmi_touch); 297static DEFINE_PER_CPU(int, nmi_touch);
298 298
299void touch_nmi_watchdog (void) 299void touch_nmi_watchdog(void)
300{ 300{
301 if (nmi_watchdog > 0) { 301 if (nmi_watchdog > 0) {
302 unsigned cpu; 302 unsigned cpu;
@@ -306,8 +306,10 @@ void touch_nmi_watchdog (void)
306 * do it ourselves because the alert count increase is not 306 * do it ourselves because the alert count increase is not
307 * atomic. 307 * atomic.
308 */ 308 */
309 for_each_present_cpu (cpu) 309 for_each_present_cpu(cpu) {
310 per_cpu(nmi_touch, cpu) = 1; 310 if (per_cpu(nmi_touch, cpu) != 1)
311 per_cpu(nmi_touch, cpu) = 1;
312 }
311 } 313 }
312 314
313 touch_softlockup_watchdog(); 315 touch_softlockup_watchdog();
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index 9409117b9f19..fa6775ef729f 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -313,17 +313,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
313 switch (request) { 313 switch (request) {
314 /* when I and D space are separate, these will need to be fixed. */ 314 /* when I and D space are separate, these will need to be fixed. */
315 case PTRACE_PEEKTEXT: /* read word at location addr. */ 315 case PTRACE_PEEKTEXT: /* read word at location addr. */
316 case PTRACE_PEEKDATA: { 316 case PTRACE_PEEKDATA:
317 unsigned long tmp; 317 ret = generic_ptrace_peekdata(child, addr, data);
318 int copied;
319
320 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
321 ret = -EIO;
322 if (copied != sizeof(tmp))
323 break;
324 ret = put_user(tmp,(unsigned long __user *) data);
325 break; 318 break;
326 }
327 319
328 /* read the word at location addr in the USER area. */ 320 /* read the word at location addr in the USER area. */
329 case PTRACE_PEEKUSR: { 321 case PTRACE_PEEKUSR: {
@@ -367,10 +359,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
367 /* when I and D space are separate, this will have to be fixed. */ 359 /* when I and D space are separate, this will have to be fixed. */
368 case PTRACE_POKETEXT: /* write the word at location addr. */ 360 case PTRACE_POKETEXT: /* write the word at location addr. */
369 case PTRACE_POKEDATA: 361 case PTRACE_POKEDATA:
370 ret = 0; 362 ret = generic_ptrace_pokedata(child, addr, data);
371 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
372 break;
373 ret = -EIO;
374 break; 363 break;
375 364
376 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 365 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index 2ff468591625..0694940b2e73 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -357,7 +357,7 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
357} 357}
358 358
359/* 359/*
360 * smp_call_function_single - Run a function on another CPU 360 * smp_call_function_single - Run a function on a specific CPU
361 * @func: The function to run. This must be fast and non-blocking. 361 * @func: The function to run. This must be fast and non-blocking.
362 * @info: An arbitrary pointer to pass to the function. 362 * @info: An arbitrary pointer to pass to the function.
363 * @nonatomic: Currently unused. 363 * @nonatomic: Currently unused.
@@ -374,14 +374,18 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
374{ 374{
375 /* prevent preemption and reschedule on another processor */ 375 /* prevent preemption and reschedule on another processor */
376 int me = get_cpu(); 376 int me = get_cpu();
377
378 /* Can deadlock when called with interrupts disabled */
379 WARN_ON(irqs_disabled());
380
377 if (cpu == me) { 381 if (cpu == me) {
382 local_irq_disable();
383 func(info);
384 local_irq_enable();
378 put_cpu(); 385 put_cpu();
379 return 0; 386 return 0;
380 } 387 }
381 388
382 /* Can deadlock when called with interrupts disabled */
383 WARN_ON(irqs_disabled());
384
385 spin_lock_bh(&call_lock); 389 spin_lock_bh(&call_lock);
386 __smp_call_function_single(cpu, func, info, nonatomic, wait); 390 __smp_call_function_single(cpu, func, info, nonatomic, wait);
387 spin_unlock_bh(&call_lock); 391 spin_unlock_bh(&call_lock);
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 7fa155c394d9..74cbeb2e99a6 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -330,6 +330,7 @@ static int print_trace_stack(void *data, char *name)
330 330
331static void print_trace_address(void *data, unsigned long addr) 331static void print_trace_address(void *data, unsigned long addr)
332{ 332{
333 touch_nmi_watchdog();
333 printk_address(addr); 334 printk_address(addr);
334} 335}
335 336
@@ -518,6 +519,7 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
518 printk("\n"); 519 printk("\n");
519 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); 520 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
520 show_registers(regs); 521 show_registers(regs);
522 add_taint(TAINT_DIE);
521 /* Executive summary in case the oops scrolled away */ 523 /* Executive summary in case the oops scrolled away */
522 printk(KERN_ALERT "RIP "); 524 printk(KERN_ALERT "RIP ");
523 printk_address(regs->rip); 525 printk_address(regs->rip);
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index 14104ff63093..06a13d9b69db 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -50,18 +50,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
50 switch (request) { 50 switch (request) {
51 case PTRACE_PEEKTEXT: /* read word at location addr. */ 51 case PTRACE_PEEKTEXT: /* read word at location addr. */
52 case PTRACE_PEEKDATA: 52 case PTRACE_PEEKDATA:
53 { 53 ret = generic_ptrace_peekdata(child, addr, data);
54 unsigned long tmp;
55 int copied;
56
57 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
58 ret = -EIO;
59 if (copied != sizeof(tmp))
60 break;
61 ret = put_user(tmp,(unsigned long *) data);
62
63 goto out; 54 goto out;
64 }
65 55
66 /* Read the word at location addr in the USER area. */ 56 /* Read the word at location addr in the USER area. */
67 57
@@ -138,10 +128,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
138 128
139 case PTRACE_POKETEXT: /* write the word at location addr. */ 129 case PTRACE_POKETEXT: /* write the word at location addr. */
140 case PTRACE_POKEDATA: 130 case PTRACE_POKEDATA:
141 if (access_process_vm(child, addr, &data, sizeof(data), 1) 131 ret = generic_ptrace_pokedata(child, addr, data);
142 == sizeof(data))
143 break;
144 ret = -EIO;
145 goto out; 132 goto out;
146 133
147 case PTRACE_POKEUSR: 134 case PTRACE_POKEUSR:
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 693ab268485e..c5e62f9d9f50 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -482,6 +482,7 @@ void die(const char * str, struct pt_regs * regs, long err)
482 if (!user_mode(regs)) 482 if (!user_mode(regs))
483 show_stack(NULL, (unsigned long*)regs->areg[1]); 483 show_stack(NULL, (unsigned long*)regs->areg[1]);
484 484
485 add_taint(TAINT_DIE);
485 spin_unlock_irq(&die_lock); 486 spin_unlock_irq(&die_lock);
486 487
487 if (in_interrupt()) 488 if (in_interrupt())
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 109e91b91ffa..3e316dd72529 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1322,10 +1322,9 @@ static void *as_init_queue(request_queue_t *q)
1322{ 1322{
1323 struct as_data *ad; 1323 struct as_data *ad;
1324 1324
1325 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); 1325 ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
1326 if (!ad) 1326 if (!ad)
1327 return NULL; 1327 return NULL;
1328 memset(ad, 0, sizeof(*ad));
1329 1328
1330 ad->q = q; /* Identify what queue the data belongs to */ 1329 ad->q = q; /* Identify what queue the data belongs to */
1331 1330
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e0aa4dad6742..9755a3cfad26 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1251,9 +1251,9 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1251{ 1251{
1252 struct cfq_io_context *cic; 1252 struct cfq_io_context *cic;
1253 1253
1254 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node); 1254 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1255 cfqd->queue->node);
1255 if (cic) { 1256 if (cic) {
1256 memset(cic, 0, sizeof(*cic));
1257 cic->last_end_request = jiffies; 1257 cic->last_end_request = jiffies;
1258 INIT_LIST_HEAD(&cic->queue_list); 1258 INIT_LIST_HEAD(&cic->queue_list);
1259 cic->dtor = cfq_free_io_context; 1259 cic->dtor = cfq_free_io_context;
@@ -1376,17 +1376,19 @@ retry:
1376 * free memory. 1376 * free memory.
1377 */ 1377 */
1378 spin_unlock_irq(cfqd->queue->queue_lock); 1378 spin_unlock_irq(cfqd->queue->queue_lock);
1379 new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node); 1379 new_cfqq = kmem_cache_alloc_node(cfq_pool,
1380 gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
1381 cfqd->queue->node);
1380 spin_lock_irq(cfqd->queue->queue_lock); 1382 spin_lock_irq(cfqd->queue->queue_lock);
1381 goto retry; 1383 goto retry;
1382 } else { 1384 } else {
1383 cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node); 1385 cfqq = kmem_cache_alloc_node(cfq_pool,
1386 gfp_mask | __GFP_ZERO,
1387 cfqd->queue->node);
1384 if (!cfqq) 1388 if (!cfqq)
1385 goto out; 1389 goto out;
1386 } 1390 }
1387 1391
1388 memset(cfqq, 0, sizeof(*cfqq));
1389
1390 RB_CLEAR_NODE(&cfqq->rb_node); 1392 RB_CLEAR_NODE(&cfqq->rb_node);
1391 INIT_LIST_HEAD(&cfqq->fifo); 1393 INIT_LIST_HEAD(&cfqq->fifo);
1392 1394
@@ -2079,12 +2081,10 @@ static void *cfq_init_queue(request_queue_t *q)
2079{ 2081{
2080 struct cfq_data *cfqd; 2082 struct cfq_data *cfqd;
2081 2083
2082 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); 2084 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
2083 if (!cfqd) 2085 if (!cfqd)
2084 return NULL; 2086 return NULL;
2085 2087
2086 memset(cfqd, 0, sizeof(*cfqd));
2087
2088 cfqd->service_tree = CFQ_RB_ROOT; 2088 cfqd->service_tree = CFQ_RB_ROOT;
2089 INIT_LIST_HEAD(&cfqd->cic_list); 2089 INIT_LIST_HEAD(&cfqd->cic_list);
2090 2090
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 6d673e938d3e..87ca02ac84cb 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -360,10 +360,9 @@ static void *deadline_init_queue(request_queue_t *q)
360{ 360{
361 struct deadline_data *dd; 361 struct deadline_data *dd;
362 362
363 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); 363 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
364 if (!dd) 364 if (!dd)
365 return NULL; 365 return NULL;
366 memset(dd, 0, sizeof(*dd));
367 366
368 INIT_LIST_HEAD(&dd->fifo_list[READ]); 367 INIT_LIST_HEAD(&dd->fifo_list[READ]);
369 INIT_LIST_HEAD(&dd->fifo_list[WRITE]); 368 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
diff --git a/block/elevator.c b/block/elevator.c
index 4769a25d7037..d265963d1ed3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -177,11 +177,10 @@ static elevator_t *elevator_alloc(request_queue_t *q, struct elevator_type *e)
177 elevator_t *eq; 177 elevator_t *eq;
178 int i; 178 int i;
179 179
180 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL, q->node); 180 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
181 if (unlikely(!eq)) 181 if (unlikely(!eq))
182 goto err; 182 goto err;
183 183
184 memset(eq, 0, sizeof(*eq));
185 eq->ops = &e->ops; 184 eq->ops = &e->ops;
186 eq->elevator_type = e; 185 eq->elevator_type = e;
187 kobject_init(&eq->kobj); 186 kobject_init(&eq->kobj);
diff --git a/block/genhd.c b/block/genhd.c
index 863a8c0623ed..3af1e7a378d4 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -108,28 +108,24 @@ out:
108 108
109EXPORT_SYMBOL(register_blkdev); 109EXPORT_SYMBOL(register_blkdev);
110 110
111/* todo: make void - error printk here */ 111void unregister_blkdev(unsigned int major, const char *name)
112int unregister_blkdev(unsigned int major, const char *name)
113{ 112{
114 struct blk_major_name **n; 113 struct blk_major_name **n;
115 struct blk_major_name *p = NULL; 114 struct blk_major_name *p = NULL;
116 int index = major_to_index(major); 115 int index = major_to_index(major);
117 int ret = 0;
118 116
119 mutex_lock(&block_subsys_lock); 117 mutex_lock(&block_subsys_lock);
120 for (n = &major_names[index]; *n; n = &(*n)->next) 118 for (n = &major_names[index]; *n; n = &(*n)->next)
121 if ((*n)->major == major) 119 if ((*n)->major == major)
122 break; 120 break;
123 if (!*n || strcmp((*n)->name, name)) 121 if (!*n || strcmp((*n)->name, name)) {
124 ret = -EINVAL; 122 WARN_ON(1);
125 else { 123 } else {
126 p = *n; 124 p = *n;
127 *n = p->next; 125 *n = p->next;
128 } 126 }
129 mutex_unlock(&block_subsys_lock); 127 mutex_unlock(&block_subsys_lock);
130 kfree(p); 128 kfree(p);
131
132 return ret;
133} 129}
134 130
135EXPORT_SYMBOL(unregister_blkdev); 131EXPORT_SYMBOL(unregister_blkdev);
@@ -726,21 +722,21 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
726{ 722{
727 struct gendisk *disk; 723 struct gendisk *disk;
728 724
729 disk = kmalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id); 725 disk = kmalloc_node(sizeof(struct gendisk),
726 GFP_KERNEL | __GFP_ZERO, node_id);
730 if (disk) { 727 if (disk) {
731 memset(disk, 0, sizeof(struct gendisk));
732 if (!init_disk_stats(disk)) { 728 if (!init_disk_stats(disk)) {
733 kfree(disk); 729 kfree(disk);
734 return NULL; 730 return NULL;
735 } 731 }
736 if (minors > 1) { 732 if (minors > 1) {
737 int size = (minors - 1) * sizeof(struct hd_struct *); 733 int size = (minors - 1) * sizeof(struct hd_struct *);
738 disk->part = kmalloc_node(size, GFP_KERNEL, node_id); 734 disk->part = kmalloc_node(size,
735 GFP_KERNEL | __GFP_ZERO, node_id);
739 if (!disk->part) { 736 if (!disk->part) {
740 kfree(disk); 737 kfree(disk);
741 return NULL; 738 return NULL;
742 } 739 }
743 memset(disk->part, 0, size);
744 } 740 }
745 disk->minors = minors; 741 disk->minors = minors;
746 kobj_set_kset_s(disk,block_subsys); 742 kobj_set_kset_s(disk,block_subsys);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 11e4235d0b0c..d7cadf304168 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1829,11 +1829,11 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1829{ 1829{
1830 request_queue_t *q; 1830 request_queue_t *q;
1831 1831
1832 q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id); 1832 q = kmem_cache_alloc_node(requestq_cachep,
1833 gfp_mask | __GFP_ZERO, node_id);
1833 if (!q) 1834 if (!q)
1834 return NULL; 1835 return NULL;
1835 1836
1836 memset(q, 0, sizeof(*q));
1837 init_timer(&q->unplug_timer); 1837 init_timer(&q->unplug_timer);
1838 1838
1839 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); 1839 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 5d576435fccc..fb8a749423ca 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2666,7 +2666,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2666 mv_print_info(host); 2666 mv_print_info(host);
2667 2667
2668 pci_set_master(pdev); 2668 pci_set_master(pdev);
2669 pci_set_mwi(pdev); 2669 pci_try_set_mwi(pdev);
2670 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, 2670 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2671 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); 2671 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2672} 2672}
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 59651abfa4f8..b34b3829f6a9 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1040,7 +1040,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
1040 struct atm_qos * qos; 1040 struct atm_qos * qos;
1041 struct atm_trafprm * txtp; 1041 struct atm_trafprm * txtp;
1042 struct atm_trafprm * rxtp; 1042 struct atm_trafprm * rxtp;
1043 u16 tx_rate_bits; 1043 u16 tx_rate_bits = -1; // hush gcc
1044 u16 tx_vc_bits = -1; // hush gcc 1044 u16 tx_vc_bits = -1; // hush gcc
1045 u16 tx_frame_bits = -1; // hush gcc 1045 u16 tx_frame_bits = -1; // hush gcc
1046 1046
@@ -1096,6 +1096,8 @@ static int amb_open (struct atm_vcc * atm_vcc)
1096 r = round_up; 1096 r = round_up;
1097 } 1097 }
1098 error = make_rate (pcr, r, &tx_rate_bits, NULL); 1098 error = make_rate (pcr, r, &tx_rate_bits, NULL);
1099 if (error)
1100 return error;
1099 tx_vc_bits = TX_UBR_CAPPED; 1101 tx_vc_bits = TX_UBR_CAPPED;
1100 tx_frame_bits = TX_FRAME_CAPPED; 1102 tx_frame_bits = TX_FRAME_CAPPED;
1101 } 1103 }
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 020a87a476c8..58583c6ac5be 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -915,7 +915,7 @@ static int open_tx_first(struct atm_vcc *vcc)
915 unsigned long flags; 915 unsigned long flags;
916 u32 *loop; 916 u32 *loop;
917 unsigned short chan; 917 unsigned short chan;
918 int pcr,unlimited; 918 int unlimited;
919 919
920 DPRINTK("open_tx_first\n"); 920 DPRINTK("open_tx_first\n");
921 zatm_dev = ZATM_DEV(vcc->dev); 921 zatm_dev = ZATM_DEV(vcc->dev);
@@ -936,6 +936,8 @@ static int open_tx_first(struct atm_vcc *vcc)
936 vcc->qos.txtp.max_pcr >= ATM_OC3_PCR); 936 vcc->qos.txtp.max_pcr >= ATM_OC3_PCR);
937 if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr; 937 if (unlimited && zatm_dev->ubr != -1) zatm_vcc->shaper = zatm_dev->ubr;
938 else { 938 else {
939 int uninitialized_var(pcr);
940
939 if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU; 941 if (unlimited) vcc->qos.txtp.max_sdu = ATM_MAX_AAL5_PDU;
940 if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr, 942 if ((zatm_vcc->shaper = alloc_shaper(vcc->dev,&pcr,
941 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited)) 943 vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,unlimited))
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index c5a61571a076..8f65b88cf711 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -421,4 +421,10 @@ config SUNVDC
421 421
422source "drivers/s390/block/Kconfig" 422source "drivers/s390/block/Kconfig"
423 423
424config XILINX_SYSACE
425 tristate "Xilinx SystemACE support"
426 depends on 4xx
427 help
428 Include support for the Xilinx SystemACE CompactFlash interface
429
424endif # BLK_DEV 430endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 7926be8c9fb7..9ee08ab4ffa8 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_BLK_DEV_XD) += xd.o
17obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o 17obj-$(CONFIG_BLK_CPQ_DA) += cpqarray.o
18obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o 18obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
19obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o 19obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
20obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
20obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 21obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
21obj-$(CONFIG_SUNVDC) += sunvdc.o 22obj-$(CONFIG_SUNVDC) += sunvdc.o
22 23
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4503290da407..e425daa1eac3 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -68,6 +68,7 @@
68#include <linux/loop.h> 68#include <linux/loop.h>
69#include <linux/compat.h> 69#include <linux/compat.h>
70#include <linux/suspend.h> 70#include <linux/suspend.h>
71#include <linux/freezer.h>
71#include <linux/writeback.h> 72#include <linux/writeback.h>
72#include <linux/buffer_head.h> /* for invalidate_bdev() */ 73#include <linux/buffer_head.h> /* for invalidate_bdev() */
73#include <linux/completion.h> 74#include <linux/completion.h>
@@ -600,13 +601,6 @@ static int loop_thread(void *data)
600 struct loop_device *lo = data; 601 struct loop_device *lo = data;
601 struct bio *bio; 602 struct bio *bio;
602 603
603 /*
604 * loop can be used in an encrypted device,
605 * hence, it mustn't be stopped at all
606 * because it could be indirectly used during suspension
607 */
608 current->flags |= PF_NOFREEZE;
609
610 set_user_nice(current, -20); 604 set_user_nice(current, -20);
611 605
612 while (!kthread_should_stop() || lo->lo_bio) { 606 while (!kthread_should_stop() || lo->lo_bio) {
@@ -1574,8 +1568,7 @@ static void __exit loop_exit(void)
1574 loop_del_one(lo); 1568 loop_del_one(lo);
1575 1569
1576 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range); 1570 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
1577 if (unregister_blkdev(LOOP_MAJOR, "loop")) 1571 unregister_blkdev(LOOP_MAJOR, "loop");
1578 printk(KERN_WARNING "loop: cannot unregister blkdev\n");
1579} 1572}
1580 1573
1581module_init(loop_init); 1574module_init(loop_init);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 7c294a40002e..31be33e4f119 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1593,6 +1593,7 @@ static int kcdrwd(void *foobar)
1593 long min_sleep_time, residue; 1593 long min_sleep_time, residue;
1594 1594
1595 set_user_nice(current, -20); 1595 set_user_nice(current, -20);
1596 set_freezable();
1596 1597
1597 for (;;) { 1598 for (;;) {
1598 DECLARE_WAITQUEUE(wait, current); 1599 DECLARE_WAITQUEUE(wait, current);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
new file mode 100644
index 000000000000..732ec63b6e9c
--- /dev/null
+++ b/drivers/block/xsysace.c
@@ -0,0 +1,1164 @@
1/*
2 * Xilinx SystemACE device driver
3 *
4 * Copyright 2007 Secret Lab Technologies Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11/*
12 * The SystemACE chip is designed to configure FPGAs by loading an FPGA
13 * bitstream from a file on a CF card and squirting it into FPGAs connected
14 * to the SystemACE JTAG chain. It also has the advantage of providing an
15 * MPU interface which can be used to control the FPGA configuration process
16 * and to use the attached CF card for general purpose storage.
17 *
18 * This driver is a block device driver for the SystemACE.
19 *
20 * Initialization:
21 * The driver registers itself as a platform_device driver at module
22 * load time. The platform bus will take care of calling the
23 * ace_probe() method for all SystemACE instances in the system. Any
24 * number of SystemACE instances are supported. ace_probe() calls
25 * ace_setup() which initialized all data structures, reads the CF
26 * id structure and registers the device.
27 *
28 * Processing:
29 * Just about all of the heavy lifting in this driver is performed by
30 * a Finite State Machine (FSM). The driver needs to wait on a number
31 * of events; some raised by interrupts, some which need to be polled
32 * for. Describing all of the behaviour in a FSM seems to be the
33 * easiest way to keep the complexity low and make it easy to
34 * understand what the driver is doing. If the block ops or the
35 * request function need to interact with the hardware, then they
36 * simply need to flag the request and kick of FSM processing.
37 *
38 * The FSM itself is atomic-safe code which can be run from any
39 * context. The general process flow is:
40 * 1. obtain the ace->lock spinlock.
41 * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is
42 * cleared.
43 * 3. release the lock.
44 *
45 * Individual states do not sleep in any way. If a condition needs to
46 * be waited for then the state much clear the fsm_continue flag and
47 * either schedule the FSM to be run again at a later time, or expect
48 * an interrupt to call the FSM when the desired condition is met.
49 *
50 * In normal operation, the FSM is processed at interrupt context
51 * either when the driver's tasklet is scheduled, or when an irq is
52 * raised by the hardware. The tasklet can be scheduled at any time.
53 * The request method in particular schedules the tasklet when a new
54 * request has been indicated by the block layer. Once started, the
55 * FSM proceeds as far as it can processing the request until it
56 * needs on a hardware event. At this point, it must yield execution.
57 *
58 * A state has two options when yielding execution:
59 * 1. ace_fsm_yield()
60 * - Call if need to poll for event.
61 * - clears the fsm_continue flag to exit the processing loop
62 * - reschedules the tasklet to run again as soon as possible
63 * 2. ace_fsm_yieldirq()
64 * - Call if an irq is expected from the HW
65 * - clears the fsm_continue flag to exit the processing loop
66 * - does not reschedule the tasklet so the FSM will not be processed
67 * again until an irq is received.
68 * After calling a yield function, the state must return control back
69 * to the FSM main loop.
70 *
71 * Additionally, the driver maintains a kernel timer which can process
72 * the FSM. If the FSM gets stalled, typically due to a missed
73 * interrupt, then the kernel timer will expire and the driver can
74 * continue where it left off.
75 *
76 * To Do:
77 * - Add FPGA configuration control interface.
78 * - Request major number from lanana
79 */
80
81#undef DEBUG
82
83#include <linux/module.h>
84#include <linux/ctype.h>
85#include <linux/init.h>
86#include <linux/interrupt.h>
87#include <linux/errno.h>
88#include <linux/kernel.h>
89#include <linux/delay.h>
90#include <linux/slab.h>
91#include <linux/blkdev.h>
92#include <linux/hdreg.h>
93#include <linux/platform_device.h>
94
95MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
96MODULE_DESCRIPTION("Xilinx SystemACE device driver");
97MODULE_LICENSE("GPL");
98
99/* SystemACE register definitions */
100#define ACE_BUSMODE (0x00)
101
102#define ACE_STATUS (0x04)
103#define ACE_STATUS_CFGLOCK (0x00000001)
104#define ACE_STATUS_MPULOCK (0x00000002)
105#define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */
106#define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */
107#define ACE_STATUS_CFDETECT (0x00000010)
108#define ACE_STATUS_DATABUFRDY (0x00000020)
109#define ACE_STATUS_DATABUFMODE (0x00000040)
110#define ACE_STATUS_CFGDONE (0x00000080)
111#define ACE_STATUS_RDYFORCFCMD (0x00000100)
112#define ACE_STATUS_CFGMODEPIN (0x00000200)
113#define ACE_STATUS_CFGADDR_MASK (0x0000e000)
114#define ACE_STATUS_CFBSY (0x00020000)
115#define ACE_STATUS_CFRDY (0x00040000)
116#define ACE_STATUS_CFDWF (0x00080000)
117#define ACE_STATUS_CFDSC (0x00100000)
118#define ACE_STATUS_CFDRQ (0x00200000)
119#define ACE_STATUS_CFCORR (0x00400000)
120#define ACE_STATUS_CFERR (0x00800000)
121
122#define ACE_ERROR (0x08)
123#define ACE_CFGLBA (0x0c)
124#define ACE_MPULBA (0x10)
125
126#define ACE_SECCNTCMD (0x14)
127#define ACE_SECCNTCMD_RESET (0x0100)
128#define ACE_SECCNTCMD_IDENTIFY (0x0200)
129#define ACE_SECCNTCMD_READ_DATA (0x0300)
130#define ACE_SECCNTCMD_WRITE_DATA (0x0400)
131#define ACE_SECCNTCMD_ABORT (0x0600)
132
133#define ACE_VERSION (0x16)
134#define ACE_VERSION_REVISION_MASK (0x00FF)
135#define ACE_VERSION_MINOR_MASK (0x0F00)
136#define ACE_VERSION_MAJOR_MASK (0xF000)
137
138#define ACE_CTRL (0x18)
139#define ACE_CTRL_FORCELOCKREQ (0x0001)
140#define ACE_CTRL_LOCKREQ (0x0002)
141#define ACE_CTRL_FORCECFGADDR (0x0004)
142#define ACE_CTRL_FORCECFGMODE (0x0008)
143#define ACE_CTRL_CFGMODE (0x0010)
144#define ACE_CTRL_CFGSTART (0x0020)
145#define ACE_CTRL_CFGSEL (0x0040)
146#define ACE_CTRL_CFGRESET (0x0080)
147#define ACE_CTRL_DATABUFRDYIRQ (0x0100)
148#define ACE_CTRL_ERRORIRQ (0x0200)
149#define ACE_CTRL_CFGDONEIRQ (0x0400)
150#define ACE_CTRL_RESETIRQ (0x0800)
151#define ACE_CTRL_CFGPROG (0x1000)
152#define ACE_CTRL_CFGADDR_MASK (0xe000)
153
154#define ACE_FATSTAT (0x1c)
155
156#define ACE_NUM_MINORS 16
157#define ACE_SECTOR_SIZE (512)
158#define ACE_FIFO_SIZE (32)
159#define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE)
160
161struct ace_reg_ops;
162
163struct ace_device {
164 /* driver state data */
165 int id;
166 int media_change;
167 int users;
168 struct list_head list;
169
170 /* finite state machine data */
171 struct tasklet_struct fsm_tasklet;
172 uint fsm_task; /* Current activity (ACE_TASK_*) */
173 uint fsm_state; /* Current state (ACE_FSM_STATE_*) */
174 uint fsm_continue_flag; /* cleared to exit FSM mainloop */
175 uint fsm_iter_num;
176 struct timer_list stall_timer;
177
178 /* Transfer state/result, use for both id and block request */
179 struct request *req; /* request being processed */
180 void *data_ptr; /* pointer to I/O buffer */
181 int data_count; /* number of buffers remaining */
182 int data_result; /* Result of transfer; 0 := success */
183
184 int id_req_count; /* count of id requests */
185 int id_result;
186 struct completion id_completion; /* used when id req finishes */
187 int in_irq;
188
189 /* Details of hardware device */
190 unsigned long physaddr;
191 void *baseaddr;
192 int irq;
193 int bus_width; /* 0 := 8 bit; 1 := 16 bit */
194 struct ace_reg_ops *reg_ops;
195 int lock_count;
196
197 /* Block device data structures */
198 spinlock_t lock;
199 struct device *dev;
200 struct request_queue *queue;
201 struct gendisk *gd;
202
203 /* Inserted CF card parameters */
204 struct hd_driveid cf_id;
205};
206
207static int ace_major;
208
209/* ---------------------------------------------------------------------
210 * Low level register access
211 */
212
213struct ace_reg_ops {
214 u16(*in) (struct ace_device * ace, int reg);
215 void (*out) (struct ace_device * ace, int reg, u16 val);
216 void (*datain) (struct ace_device * ace);
217 void (*dataout) (struct ace_device * ace);
218};
219
220/* 8 Bit bus width */
221static u16 ace_in_8(struct ace_device *ace, int reg)
222{
223 void *r = ace->baseaddr + reg;
224 return in_8(r) | (in_8(r + 1) << 8);
225}
226
227static void ace_out_8(struct ace_device *ace, int reg, u16 val)
228{
229 void *r = ace->baseaddr + reg;
230 out_8(r, val);
231 out_8(r + 1, val >> 8);
232}
233
234static void ace_datain_8(struct ace_device *ace)
235{
236 void *r = ace->baseaddr + 0x40;
237 u8 *dst = ace->data_ptr;
238 int i = ACE_FIFO_SIZE;
239 while (i--)
240 *dst++ = in_8(r++);
241 ace->data_ptr = dst;
242}
243
244static void ace_dataout_8(struct ace_device *ace)
245{
246 void *r = ace->baseaddr + 0x40;
247 u8 *src = ace->data_ptr;
248 int i = ACE_FIFO_SIZE;
249 while (i--)
250 out_8(r++, *src++);
251 ace->data_ptr = src;
252}
253
254static struct ace_reg_ops ace_reg_8_ops = {
255 .in = ace_in_8,
256 .out = ace_out_8,
257 .datain = ace_datain_8,
258 .dataout = ace_dataout_8,
259};
260
261/* 16 bit big endian bus attachment */
262static u16 ace_in_be16(struct ace_device *ace, int reg)
263{
264 return in_be16(ace->baseaddr + reg);
265}
266
267static void ace_out_be16(struct ace_device *ace, int reg, u16 val)
268{
269 out_be16(ace->baseaddr + reg, val);
270}
271
272static void ace_datain_be16(struct ace_device *ace)
273{
274 int i = ACE_FIFO_SIZE / 2;
275 u16 *dst = ace->data_ptr;
276 while (i--)
277 *dst++ = in_le16(ace->baseaddr + 0x40);
278 ace->data_ptr = dst;
279}
280
281static void ace_dataout_be16(struct ace_device *ace)
282{
283 int i = ACE_FIFO_SIZE / 2;
284 u16 *src = ace->data_ptr;
285 while (i--)
286 out_le16(ace->baseaddr + 0x40, *src++);
287 ace->data_ptr = src;
288}
289
290/* 16 bit little endian bus attachment */
291static u16 ace_in_le16(struct ace_device *ace, int reg)
292{
293 return in_le16(ace->baseaddr + reg);
294}
295
296static void ace_out_le16(struct ace_device *ace, int reg, u16 val)
297{
298 out_le16(ace->baseaddr + reg, val);
299}
300
301static void ace_datain_le16(struct ace_device *ace)
302{
303 int i = ACE_FIFO_SIZE / 2;
304 u16 *dst = ace->data_ptr;
305 while (i--)
306 *dst++ = in_be16(ace->baseaddr + 0x40);
307 ace->data_ptr = dst;
308}
309
310static void ace_dataout_le16(struct ace_device *ace)
311{
312 int i = ACE_FIFO_SIZE / 2;
313 u16 *src = ace->data_ptr;
314 while (i--)
315 out_be16(ace->baseaddr + 0x40, *src++);
316 ace->data_ptr = src;
317}
318
319static struct ace_reg_ops ace_reg_be16_ops = {
320 .in = ace_in_be16,
321 .out = ace_out_be16,
322 .datain = ace_datain_be16,
323 .dataout = ace_dataout_be16,
324};
325
326static struct ace_reg_ops ace_reg_le16_ops = {
327 .in = ace_in_le16,
328 .out = ace_out_le16,
329 .datain = ace_datain_le16,
330 .dataout = ace_dataout_le16,
331};
332
333static inline u16 ace_in(struct ace_device *ace, int reg)
334{
335 return ace->reg_ops->in(ace, reg);
336}
337
338static inline u32 ace_in32(struct ace_device *ace, int reg)
339{
340 return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16);
341}
342
343static inline void ace_out(struct ace_device *ace, int reg, u16 val)
344{
345 ace->reg_ops->out(ace, reg, val);
346}
347
348static inline void ace_out32(struct ace_device *ace, int reg, u32 val)
349{
350 ace_out(ace, reg, val);
351 ace_out(ace, reg + 2, val >> 16);
352}
353
354/* ---------------------------------------------------------------------
355 * Debug support functions
356 */
357
358#if defined(DEBUG)
359static void ace_dump_mem(void *base, int len)
360{
361 const char *ptr = base;
362 int i, j;
363
364 for (i = 0; i < len; i += 16) {
365 printk(KERN_INFO "%.8x:", i);
366 for (j = 0; j < 16; j++) {
367 if (!(j % 4))
368 printk(" ");
369 printk("%.2x", ptr[i + j]);
370 }
371 printk(" ");
372 for (j = 0; j < 16; j++)
373 printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.');
374 printk("\n");
375 }
376}
377#else
378static inline void ace_dump_mem(void *base, int len)
379{
380}
381#endif
382
383static void ace_dump_regs(struct ace_device *ace)
384{
385 dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n"
386 " status:%.8x mpu_lba:%.8x busmode:%4x\n"
387 " error: %.8x cfg_lba:%.8x fatstat:%.4x\n",
388 ace_in32(ace, ACE_CTRL),
389 ace_in(ace, ACE_SECCNTCMD),
390 ace_in(ace, ACE_VERSION),
391 ace_in32(ace, ACE_STATUS),
392 ace_in32(ace, ACE_MPULBA),
393 ace_in(ace, ACE_BUSMODE),
394 ace_in32(ace, ACE_ERROR),
395 ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT));
396}
397
398void ace_fix_driveid(struct hd_driveid *id)
399{
400#if defined(__BIG_ENDIAN)
401 u16 *buf = (void *)id;
402 int i;
403
404 /* All half words have wrong byte order; swap the bytes */
405 for (i = 0; i < sizeof(struct hd_driveid); i += 2, buf++)
406 *buf = le16_to_cpu(*buf);
407
408 /* Some of the data values are 32bit; swap the half words */
409 id->lba_capacity = ((id->lba_capacity >> 16) & 0x0000FFFF) |
410 ((id->lba_capacity << 16) & 0xFFFF0000);
411 id->spg = ((id->spg >> 16) & 0x0000FFFF) |
412 ((id->spg << 16) & 0xFFFF0000);
413#endif
414}
415
416/* ---------------------------------------------------------------------
417 * Finite State Machine (FSM) implementation
418 */
419
420/* FSM tasks; used to direct state transitions */
421#define ACE_TASK_IDLE 0
422#define ACE_TASK_IDENTIFY 1
423#define ACE_TASK_READ 2
424#define ACE_TASK_WRITE 3
425#define ACE_FSM_NUM_TASKS 4
426
427/* FSM state definitions */
428#define ACE_FSM_STATE_IDLE 0
429#define ACE_FSM_STATE_REQ_LOCK 1
430#define ACE_FSM_STATE_WAIT_LOCK 2
431#define ACE_FSM_STATE_WAIT_CFREADY 3
432#define ACE_FSM_STATE_IDENTIFY_PREPARE 4
433#define ACE_FSM_STATE_IDENTIFY_TRANSFER 5
434#define ACE_FSM_STATE_IDENTIFY_COMPLETE 6
435#define ACE_FSM_STATE_REQ_PREPARE 7
436#define ACE_FSM_STATE_REQ_TRANSFER 8
437#define ACE_FSM_STATE_REQ_COMPLETE 9
438#define ACE_FSM_STATE_ERROR 10
439#define ACE_FSM_NUM_STATES 11
440
441/* Set flag to exit FSM loop and reschedule tasklet */
442static inline void ace_fsm_yield(struct ace_device *ace)
443{
444 dev_dbg(ace->dev, "ace_fsm_yield()\n");
445 tasklet_schedule(&ace->fsm_tasklet);
446 ace->fsm_continue_flag = 0;
447}
448
449/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
450static inline void ace_fsm_yieldirq(struct ace_device *ace)
451{
452 dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
453
454 if (ace->irq == NO_IRQ)
455 /* No IRQ assigned, so need to poll */
456 tasklet_schedule(&ace->fsm_tasklet);
457 ace->fsm_continue_flag = 0;
458}
459
460/* Get the next read/write request; ending requests that we don't handle */
461struct request *ace_get_next_request(request_queue_t * q)
462{
463 struct request *req;
464
465 while ((req = elv_next_request(q)) != NULL) {
466 if (blk_fs_request(req))
467 break;
468 end_request(req, 0);
469 }
470 return req;
471}
472
473static void ace_fsm_dostate(struct ace_device *ace)
474{
475 struct request *req;
476 u32 status;
477 u16 val;
478 int count;
479 int i;
480
481#if defined(DEBUG)
482 dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
483 ace->fsm_state, ace->id_req_count);
484#endif
485
486 switch (ace->fsm_state) {
487 case ACE_FSM_STATE_IDLE:
488 /* See if there is anything to do */
489 if (ace->id_req_count || ace_get_next_request(ace->queue)) {
490 ace->fsm_iter_num++;
491 ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
492 mod_timer(&ace->stall_timer, jiffies + HZ);
493 if (!timer_pending(&ace->stall_timer))
494 add_timer(&ace->stall_timer);
495 break;
496 }
497 del_timer(&ace->stall_timer);
498 ace->fsm_continue_flag = 0;
499 break;
500
501 case ACE_FSM_STATE_REQ_LOCK:
502 if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
503 /* Already have the lock, jump to next state */
504 ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
505 break;
506 }
507
508 /* Request the lock */
509 val = ace_in(ace, ACE_CTRL);
510 ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
511 ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
512 break;
513
514 case ACE_FSM_STATE_WAIT_LOCK:
515 if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
516 /* got the lock; move to next state */
517 ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
518 break;
519 }
520
521 /* wait a bit for the lock */
522 ace_fsm_yield(ace);
523 break;
524
525 case ACE_FSM_STATE_WAIT_CFREADY:
526 status = ace_in32(ace, ACE_STATUS);
527 if (!(status & ACE_STATUS_RDYFORCFCMD) ||
528 (status & ACE_STATUS_CFBSY)) {
529 /* CF card isn't ready; it needs to be polled */
530 ace_fsm_yield(ace);
531 break;
532 }
533
534 /* Device is ready for command; determine what to do next */
535 if (ace->id_req_count)
536 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
537 else
538 ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
539 break;
540
541 case ACE_FSM_STATE_IDENTIFY_PREPARE:
542 /* Send identify command */
543 ace->fsm_task = ACE_TASK_IDENTIFY;
544 ace->data_ptr = &ace->cf_id;
545 ace->data_count = ACE_BUF_PER_SECTOR;
546 ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);
547
548 /* As per datasheet, put config controller in reset */
549 val = ace_in(ace, ACE_CTRL);
550 ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
551
552 /* irq handler takes over from this point; wait for the
553 * transfer to complete */
554 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
555 ace_fsm_yieldirq(ace);
556 break;
557
558 case ACE_FSM_STATE_IDENTIFY_TRANSFER:
559 /* Check that the sysace is ready to receive data */
560 status = ace_in32(ace, ACE_STATUS);
561 if (status & ACE_STATUS_CFBSY) {
562 dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
563 ace->fsm_task, ace->fsm_iter_num,
564 ace->data_count);
565 ace_fsm_yield(ace);
566 break;
567 }
568 if (!(status & ACE_STATUS_DATABUFRDY)) {
569 ace_fsm_yield(ace);
570 break;
571 }
572
573 /* Transfer the next buffer */
574 ace->reg_ops->datain(ace);
575 ace->data_count--;
576
577 /* If there are still buffers to be transfers; jump out here */
578 if (ace->data_count != 0) {
579 ace_fsm_yieldirq(ace);
580 break;
581 }
582
583 /* transfer finished; kick state machine */
584 dev_dbg(ace->dev, "identify finished\n");
585 ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
586 break;
587
588 case ACE_FSM_STATE_IDENTIFY_COMPLETE:
589 ace_fix_driveid(&ace->cf_id);
590 ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */
591
592 if (ace->data_result) {
593 /* Error occured, disable the disk */
594 ace->media_change = 1;
595 set_capacity(ace->gd, 0);
596 dev_err(ace->dev, "error fetching CF id (%i)\n",
597 ace->data_result);
598 } else {
599 ace->media_change = 0;
600
601 /* Record disk parameters */
602 set_capacity(ace->gd, ace->cf_id.lba_capacity);
603 dev_info(ace->dev, "capacity: %i sectors\n",
604 ace->cf_id.lba_capacity);
605 }
606
607 /* We're done, drop to IDLE state and notify waiters */
608 ace->fsm_state = ACE_FSM_STATE_IDLE;
609 ace->id_result = ace->data_result;
610 while (ace->id_req_count) {
611 complete(&ace->id_completion);
612 ace->id_req_count--;
613 }
614 break;
615
616 case ACE_FSM_STATE_REQ_PREPARE:
617 req = ace_get_next_request(ace->queue);
618 if (!req) {
619 ace->fsm_state = ACE_FSM_STATE_IDLE;
620 break;
621 }
622
623 /* Okay, it's a data request, set it up for transfer */
624 dev_dbg(ace->dev,
625 "request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
626 req->sector, req->hard_nr_sectors,
627 req->current_nr_sectors, rq_data_dir(req));
628
629 ace->req = req;
630 ace->data_ptr = req->buffer;
631 ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
632 ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
633
634 count = req->hard_nr_sectors;
635 if (rq_data_dir(req)) {
636 /* Kick off write request */
637 dev_dbg(ace->dev, "write data\n");
638 ace->fsm_task = ACE_TASK_WRITE;
639 ace_out(ace, ACE_SECCNTCMD,
640 count | ACE_SECCNTCMD_WRITE_DATA);
641 } else {
642 /* Kick off read request */
643 dev_dbg(ace->dev, "read data\n");
644 ace->fsm_task = ACE_TASK_READ;
645 ace_out(ace, ACE_SECCNTCMD,
646 count | ACE_SECCNTCMD_READ_DATA);
647 }
648
649 /* As per datasheet, put config controller in reset */
650 val = ace_in(ace, ACE_CTRL);
651 ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);
652
653 /* Move to the transfer state. The systemace will raise
654 * an interrupt once there is something to do
655 */
656 ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
657 if (ace->fsm_task == ACE_TASK_READ)
658 ace_fsm_yieldirq(ace); /* wait for data ready */
659 break;
660
661 case ACE_FSM_STATE_REQ_TRANSFER:
662 /* Check that the sysace is ready to receive data */
663 status = ace_in32(ace, ACE_STATUS);
664 if (status & ACE_STATUS_CFBSY) {
665 dev_dbg(ace->dev,
666 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
667 ace->fsm_task, ace->fsm_iter_num,
668 ace->req->current_nr_sectors * 16,
669 ace->data_count, ace->in_irq);
670 ace_fsm_yield(ace); /* need to poll CFBSY bit */
671 break;
672 }
673 if (!(status & ACE_STATUS_DATABUFRDY)) {
674 dev_dbg(ace->dev,
675 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
676 ace->fsm_task, ace->fsm_iter_num,
677 ace->req->current_nr_sectors * 16,
678 ace->data_count, ace->in_irq);
679 ace_fsm_yieldirq(ace);
680 break;
681 }
682
683 /* Transfer the next buffer */
684 i = 16;
685 if (ace->fsm_task == ACE_TASK_WRITE)
686 ace->reg_ops->dataout(ace);
687 else
688 ace->reg_ops->datain(ace);
689 ace->data_count--;
690
691 /* If there are still buffers to be transfers; jump out here */
692 if (ace->data_count != 0) {
693 ace_fsm_yieldirq(ace);
694 break;
695 }
696
697 /* bio finished; is there another one? */
698 i = ace->req->current_nr_sectors;
699 if (end_that_request_first(ace->req, 1, i)) {
700 /* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
701 * ace->req->hard_nr_sectors,
702 * ace->req->current_nr_sectors);
703 */
704 ace->data_ptr = ace->req->buffer;
705 ace->data_count = ace->req->current_nr_sectors * 16;
706 ace_fsm_yieldirq(ace);
707 break;
708 }
709
710 ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
711 break;
712
713 case ACE_FSM_STATE_REQ_COMPLETE:
714 /* Complete the block request */
715 blkdev_dequeue_request(ace->req);
716 end_that_request_last(ace->req, 1);
717 ace->req = NULL;
718
719 /* Finished request; go to idle state */
720 ace->fsm_state = ACE_FSM_STATE_IDLE;
721 break;
722
723 default:
724 ace->fsm_state = ACE_FSM_STATE_IDLE;
725 break;
726 }
727}
728
729static void ace_fsm_tasklet(unsigned long data)
730{
731 struct ace_device *ace = (void *)data;
732 unsigned long flags;
733
734 spin_lock_irqsave(&ace->lock, flags);
735
736 /* Loop over state machine until told to stop */
737 ace->fsm_continue_flag = 1;
738 while (ace->fsm_continue_flag)
739 ace_fsm_dostate(ace);
740
741 spin_unlock_irqrestore(&ace->lock, flags);
742}
743
744static void ace_stall_timer(unsigned long data)
745{
746 struct ace_device *ace = (void *)data;
747 unsigned long flags;
748
749 dev_warn(ace->dev,
750 "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n",
751 ace->fsm_state, ace->fsm_task, ace->fsm_iter_num,
752 ace->data_count);
753 spin_lock_irqsave(&ace->lock, flags);
754
755 /* Rearm the stall timer *before* entering FSM (which may then
756 * delete the timer) */
757 mod_timer(&ace->stall_timer, jiffies + HZ);
758
759 /* Loop over state machine until told to stop */
760 ace->fsm_continue_flag = 1;
761 while (ace->fsm_continue_flag)
762 ace_fsm_dostate(ace);
763
764 spin_unlock_irqrestore(&ace->lock, flags);
765}
766
767/* ---------------------------------------------------------------------
768 * Interrupt handling routines
769 */
770static int ace_interrupt_checkstate(struct ace_device *ace)
771{
772 u32 sreg = ace_in32(ace, ACE_STATUS);
773 u16 creg = ace_in(ace, ACE_CTRL);
774
775 /* Check for error occurance */
776 if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) &&
777 (creg & ACE_CTRL_ERRORIRQ)) {
778 dev_err(ace->dev, "transfer failure\n");
779 ace_dump_regs(ace);
780 return -EIO;
781 }
782
783 return 0;
784}
785
786static irqreturn_t ace_interrupt(int irq, void *dev_id)
787{
788 u16 creg;
789 struct ace_device *ace = dev_id;
790
791 /* be safe and get the lock */
792 spin_lock(&ace->lock);
793 ace->in_irq = 1;
794
795 /* clear the interrupt */
796 creg = ace_in(ace, ACE_CTRL);
797 ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ);
798 ace_out(ace, ACE_CTRL, creg);
799
800 /* check for IO failures */
801 if (ace_interrupt_checkstate(ace))
802 ace->data_result = -EIO;
803
804 if (ace->fsm_task == 0) {
805 dev_err(ace->dev,
806 "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n",
807 ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL),
808 ace_in(ace, ACE_SECCNTCMD));
809 dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n",
810 ace->fsm_task, ace->fsm_state, ace->data_count);
811 }
812
813 /* Loop over state machine until told to stop */
814 ace->fsm_continue_flag = 1;
815 while (ace->fsm_continue_flag)
816 ace_fsm_dostate(ace);
817
818 /* done with interrupt; drop the lock */
819 ace->in_irq = 0;
820 spin_unlock(&ace->lock);
821
822 return IRQ_HANDLED;
823}
824
825/* ---------------------------------------------------------------------
826 * Block ops
827 */
828static void ace_request(request_queue_t * q)
829{
830 struct request *req;
831 struct ace_device *ace;
832
833 req = ace_get_next_request(q);
834
835 if (req) {
836 ace = req->rq_disk->private_data;
837 tasklet_schedule(&ace->fsm_tasklet);
838 }
839}
840
841static int ace_media_changed(struct gendisk *gd)
842{
843 struct ace_device *ace = gd->private_data;
844 dev_dbg(ace->dev, "ace_media_changed(): %i\n", ace->media_change);
845
846 return ace->media_change;
847}
848
849static int ace_revalidate_disk(struct gendisk *gd)
850{
851 struct ace_device *ace = gd->private_data;
852 unsigned long flags;
853
854 dev_dbg(ace->dev, "ace_revalidate_disk()\n");
855
856 if (ace->media_change) {
857 dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n");
858
859 spin_lock_irqsave(&ace->lock, flags);
860 ace->id_req_count++;
861 spin_unlock_irqrestore(&ace->lock, flags);
862
863 tasklet_schedule(&ace->fsm_tasklet);
864 wait_for_completion(&ace->id_completion);
865 }
866
867 dev_dbg(ace->dev, "revalidate complete\n");
868 return ace->id_result;
869}
870
871static int ace_open(struct inode *inode, struct file *filp)
872{
873 struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
874 unsigned long flags;
875
876 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
877
878 filp->private_data = ace;
879 spin_lock_irqsave(&ace->lock, flags);
880 ace->users++;
881 spin_unlock_irqrestore(&ace->lock, flags);
882
883 check_disk_change(inode->i_bdev);
884 return 0;
885}
886
887static int ace_release(struct inode *inode, struct file *filp)
888{
889 struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
890 unsigned long flags;
891 u16 val;
892
893 dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
894
895 spin_lock_irqsave(&ace->lock, flags);
896 ace->users--;
897 if (ace->users == 0) {
898 val = ace_in(ace, ACE_CTRL);
899 ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
900 }
901 spin_unlock_irqrestore(&ace->lock, flags);
902 return 0;
903}
904
905static int ace_ioctl(struct inode *inode, struct file *filp,
906 unsigned int cmd, unsigned long arg)
907{
908 struct ace_device *ace = inode->i_bdev->bd_disk->private_data;
909 struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
910 struct hd_geometry g;
911 dev_dbg(ace->dev, "ace_ioctl()\n");
912
913 switch (cmd) {
914 case HDIO_GETGEO:
915 g.heads = ace->cf_id.heads;
916 g.sectors = ace->cf_id.sectors;
917 g.cylinders = ace->cf_id.cyls;
918 g.start = 0;
919 return copy_to_user(geo, &g, sizeof(g)) ? -EFAULT : 0;
920
921 default:
922 return -ENOTTY;
923 }
924 return -ENOTTY;
925}
926
927static struct block_device_operations ace_fops = {
928 .owner = THIS_MODULE,
929 .open = ace_open,
930 .release = ace_release,
931 .media_changed = ace_media_changed,
932 .revalidate_disk = ace_revalidate_disk,
933 .ioctl = ace_ioctl,
934};
935
936/* --------------------------------------------------------------------
937 * SystemACE device setup/teardown code
938 */
939static int __devinit ace_setup(struct ace_device *ace)
940{
941 u16 version;
942 u16 val;
943
944 int rc;
945
946 spin_lock_init(&ace->lock);
947 init_completion(&ace->id_completion);
948
949 /*
950 * Map the device
951 */
952 ace->baseaddr = ioremap(ace->physaddr, 0x80);
953 if (!ace->baseaddr)
954 goto err_ioremap;
955
956 if (ace->irq != NO_IRQ) {
957 rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
958 if (rc) {
959 /* Failure - fall back to polled mode */
960 dev_err(ace->dev, "request_irq failed\n");
961 ace->irq = NO_IRQ;
962 }
963 }
964
965 /*
966 * Initialize the state machine tasklet and stall timer
967 */
968 tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
969 setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
970
971 /*
972 * Initialize the request queue
973 */
974 ace->queue = blk_init_queue(ace_request, &ace->lock);
975 if (ace->queue == NULL)
976 goto err_blk_initq;
977 blk_queue_hardsect_size(ace->queue, 512);
978
979 /*
980 * Allocate and initialize GD structure
981 */
982 ace->gd = alloc_disk(ACE_NUM_MINORS);
983 if (!ace->gd)
984 goto err_alloc_disk;
985
986 ace->gd->major = ace_major;
987 ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
988 ace->gd->fops = &ace_fops;
989 ace->gd->queue = ace->queue;
990 ace->gd->private_data = ace;
991 snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
992
993 /* set bus width */
994 if (ace->bus_width == 1) {
995 /* 0x0101 should work regardless of endianess */
996 ace_out_le16(ace, ACE_BUSMODE, 0x0101);
997
998 /* read it back to determine endianess */
999 if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001)
1000 ace->reg_ops = &ace_reg_le16_ops;
1001 else
1002 ace->reg_ops = &ace_reg_be16_ops;
1003 } else {
1004 ace_out_8(ace, ACE_BUSMODE, 0x00);
1005 ace->reg_ops = &ace_reg_8_ops;
1006 }
1007
1008 /* Make sure version register is sane */
1009 version = ace_in(ace, ACE_VERSION);
1010 if ((version == 0) || (version == 0xFFFF))
1011 goto err_read;
1012
1013 /* Put sysace in a sane state by clearing most control reg bits */
1014 ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE |
1015 ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
1016
1017 /* Enable interrupts */
1018 val = ace_in(ace, ACE_CTRL);
1019 val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ;
1020 ace_out(ace, ACE_CTRL, val);
1021
1022 /* Print the identification */
1023 dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n",
1024 (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff);
1025 dev_dbg(ace->dev, "physaddr 0x%lx, mapped to 0x%p, irq=%i\n",
1026 ace->physaddr, ace->baseaddr, ace->irq);
1027
1028 ace->media_change = 1;
1029 ace_revalidate_disk(ace->gd);
1030
1031 /* Make the sysace device 'live' */
1032 add_disk(ace->gd);
1033
1034 return 0;
1035
1036 err_read:
1037 put_disk(ace->gd);
1038 err_alloc_disk:
1039 blk_cleanup_queue(ace->queue);
1040 err_blk_initq:
1041 iounmap(ace->baseaddr);
1042 if (ace->irq != NO_IRQ)
1043 free_irq(ace->irq, ace);
1044 err_ioremap:
1045 printk(KERN_INFO "xsysace: error initializing device at 0x%lx\n",
1046 ace->physaddr);
1047 return -ENOMEM;
1048}
1049
1050static void __devexit ace_teardown(struct ace_device *ace)
1051{
1052 if (ace->gd) {
1053 del_gendisk(ace->gd);
1054 put_disk(ace->gd);
1055 }
1056
1057 if (ace->queue)
1058 blk_cleanup_queue(ace->queue);
1059
1060 tasklet_kill(&ace->fsm_tasklet);
1061
1062 if (ace->irq != NO_IRQ)
1063 free_irq(ace->irq, ace);
1064
1065 iounmap(ace->baseaddr);
1066}
1067
1068/* ---------------------------------------------------------------------
1069 * Platform Bus Support
1070 */
1071
1072static int __devinit ace_probe(struct device *device)
1073{
1074 struct platform_device *dev = to_platform_device(device);
1075 struct ace_device *ace;
1076 int i;
1077
1078 dev_dbg(device, "ace_probe(%p)\n", device);
1079
1080 /*
1081 * Allocate the ace device structure
1082 */
1083 ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
1084 if (!ace)
1085 goto err_alloc;
1086
1087 ace->dev = device;
1088 ace->id = dev->id;
1089 ace->irq = NO_IRQ;
1090
1091 for (i = 0; i < dev->num_resources; i++) {
1092 if (dev->resource[i].flags & IORESOURCE_MEM)
1093 ace->physaddr = dev->resource[i].start;
1094 if (dev->resource[i].flags & IORESOURCE_IRQ)
1095 ace->irq = dev->resource[i].start;
1096 }
1097
1098 /* FIXME: Should get bus_width from the platform_device struct */
1099 ace->bus_width = 1;
1100
1101 dev_set_drvdata(&dev->dev, ace);
1102
1103 /* Call the bus-independant setup code */
1104 if (ace_setup(ace) != 0)
1105 goto err_setup;
1106
1107 return 0;
1108
1109 err_setup:
1110 dev_set_drvdata(&dev->dev, NULL);
1111 kfree(ace);
1112 err_alloc:
1113 printk(KERN_ERR "xsysace: could not initialize device\n");
1114 return -ENOMEM;
1115}
1116
1117/*
1118 * Platform bus remove() method
1119 */
1120static int __devexit ace_remove(struct device *device)
1121{
1122 struct ace_device *ace = dev_get_drvdata(device);
1123
1124 dev_dbg(device, "ace_remove(%p)\n", device);
1125
1126 if (ace) {
1127 ace_teardown(ace);
1128 kfree(ace);
1129 }
1130
1131 return 0;
1132}
1133
1134static struct device_driver ace_driver = {
1135 .name = "xsysace",
1136 .bus = &platform_bus_type,
1137 .probe = ace_probe,
1138 .remove = __devexit_p(ace_remove),
1139};
1140
1141/* ---------------------------------------------------------------------
1142 * Module init/exit routines
1143 */
1144static int __init ace_init(void)
1145{
1146 ace_major = register_blkdev(ace_major, "xsysace");
1147 if (ace_major <= 0) {
1148 printk(KERN_WARNING "xsysace: register_blkdev() failed\n");
1149 return ace_major;
1150 }
1151
1152 pr_debug("Registering Xilinx SystemACE driver, major=%i\n", ace_major);
1153 return driver_register(&ace_driver);
1154}
1155
1156static void __exit ace_exit(void)
1157{
1158 pr_debug("Unregistering Xilinx SystemACE driver\n");
1159 driver_unregister(&ace_driver);
1160 unregister_blkdev(ace_major, "xsysace");
1161}
1162
1163module_init(ace_init);
1164module_exit(ace_exit);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 2abf94cc3137..e40fa98842e5 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -371,9 +371,7 @@ static void __exit z2_exit(void)
371{ 371{
372 int i, j; 372 int i, j;
373 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256); 373 blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
374 if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 ) 374 unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
375 printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
376
377 del_gendisk(z2ram_gendisk); 375 del_gendisk(z2ram_gendisk);
378 put_disk(z2ram_gendisk); 376 put_disk(z2ram_gendisk);
379 blk_cleanup_queue(z2_queue); 377 blk_cleanup_queue(z2_queue);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ec9dc3d53f18..d8d7125529c4 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -114,7 +114,7 @@ config COMPUTONE
114 114
115config ROCKETPORT 115config ROCKETPORT
116 tristate "Comtrol RocketPort support" 116 tristate "Comtrol RocketPort support"
117 depends on SERIAL_NONSTANDARD 117 depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
118 help 118 help
119 This driver supports Comtrol RocketPort and RocketModem PCI boards. 119 This driver supports Comtrol RocketPort and RocketModem PCI boards.
120 These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or 120 These boards provide 2, 4, 8, 16, or 32 high-speed serial ports or
@@ -157,7 +157,7 @@ config CYZ_INTR
157 157
158config DIGIEPCA 158config DIGIEPCA
159 tristate "Digiboard Intelligent Async Support" 159 tristate "Digiboard Intelligent Async Support"
160 depends on SERIAL_NONSTANDARD 160 depends on SERIAL_NONSTANDARD && (ISA || EISA || PCI)
161 ---help--- 161 ---help---
162 This is a driver for Digi International's Xx, Xeve, and Xem series 162 This is a driver for Digi International's Xx, Xeve, and Xem series
163 of cards which provide multiple serial ports. You would need 163 of cards which provide multiple serial ports. You would need
@@ -213,8 +213,6 @@ config MOXA_SMARTIO_NEW
213 This is upgraded (1.9.1) driver from original Moxa drivers with 213 This is upgraded (1.9.1) driver from original Moxa drivers with
214 changes finally resulting in PCI probing. 214 changes finally resulting in PCI probing.
215 215
216 Use at your own risk.
217
218 This driver can also be built as a module. The module will be called 216 This driver can also be built as a module. The module will be called
219 mxser_new. If you want to do that, say M here. 217 mxser_new. If you want to do that, say M here.
220 218
@@ -354,7 +352,7 @@ config STALDRV
354 352
355config STALLION 353config STALLION
356 tristate "Stallion EasyIO or EC8/32 support" 354 tristate "Stallion EasyIO or EC8/32 support"
357 depends on STALDRV && BROKEN_ON_SMP 355 depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
358 help 356 help
359 If you have an EasyIO or EasyConnection 8/32 multiport Stallion 357 If you have an EasyIO or EasyConnection 8/32 multiport Stallion
360 card, then this is for you; say Y. Make sure to read 358 card, then this is for you; say Y. Make sure to read
@@ -365,7 +363,7 @@ config STALLION
365 363
366config ISTALLION 364config ISTALLION
367 tristate "Stallion EC8/64, ONboard, Brumby support" 365 tristate "Stallion EC8/64, ONboard, Brumby support"
368 depends on STALDRV && BROKEN_ON_SMP 366 depends on STALDRV && BROKEN_ON_SMP && (ISA || EISA || PCI)
369 help 367 help
370 If you have an EasyConnection 8/64, ONboard, Brumby or Stallion 368 If you have an EasyConnection 8/64, ONboard, Brumby or Stallion
371 serial multiport card, say Y here. Make sure to read 369 serial multiport card, say Y here. Make sure to read
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 179c7a3b6e75..ec116df919d9 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -20,6 +20,7 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/pm.h> 21#include <linux/pm.h>
22#include <linux/apm-emulation.h> 22#include <linux/apm-emulation.h>
23#include <linux/freezer.h>
23#include <linux/device.h> 24#include <linux/device.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
25#include <linux/list.h> 26#include <linux/list.h>
@@ -329,13 +330,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
329 /* 330 /*
330 * Wait for the suspend/resume to complete. If there 331 * Wait for the suspend/resume to complete. If there
331 * are pending acknowledges, we wait here for them. 332 * are pending acknowledges, we wait here for them.
332 *
333 * Note: we need to ensure that the PM subsystem does
334 * not kick us out of the wait when it suspends the
335 * threads.
336 */ 333 */
337 flags = current->flags; 334 flags = current->flags;
338 current->flags |= PF_NOFREEZE;
339 335
340 wait_event(apm_suspend_waitqueue, 336 wait_event(apm_suspend_waitqueue,
341 as->suspend_state == SUSPEND_DONE); 337 as->suspend_state == SUSPEND_DONE);
@@ -365,13 +361,8 @@ apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
365 /* 361 /*
366 * Wait for the suspend/resume to complete. If there 362 * Wait for the suspend/resume to complete. If there
367 * are pending acknowledges, we wait here for them. 363 * are pending acknowledges, we wait here for them.
368 *
369 * Note: we need to ensure that the PM subsystem does
370 * not kick us out of the wait when it suspends the
371 * threads.
372 */ 364 */
373 flags = current->flags; 365 flags = current->flags;
374 current->flags |= PF_NOFREEZE;
375 366
376 wait_event_interruptible(apm_suspend_waitqueue, 367 wait_event_interruptible(apm_suspend_waitqueue,
377 as->suspend_state == SUSPEND_DONE); 368 as->suspend_state == SUSPEND_DONE);
@@ -598,7 +589,6 @@ static int __init apm_init(void)
598 kapmd_tsk = NULL; 589 kapmd_tsk = NULL;
599 return ret; 590 return ret;
600 } 591 }
601 kapmd_tsk->flags |= PF_NOFREEZE;
602 wake_up_process(kapmd_tsk); 592 wake_up_process(kapmd_tsk);
603 593
604#ifdef CONFIG_PROC_FS 594#ifdef CONFIG_PROC_FS
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index e04005b5f8a6..9e0adfe27c12 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -646,6 +646,7 @@
646#include <linux/delay.h> 646#include <linux/delay.h>
647#include <linux/spinlock.h> 647#include <linux/spinlock.h>
648#include <linux/bitops.h> 648#include <linux/bitops.h>
649#include <linux/firmware.h>
649 650
650#include <asm/system.h> 651#include <asm/system.h>
651#include <asm/io.h> 652#include <asm/io.h>
@@ -680,6 +681,44 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
680 681
681#define STD_COM_FLAGS (0) 682#define STD_COM_FLAGS (0)
682 683
684/* firmware stuff */
685#define ZL_MAX_BLOCKS 16
686#define DRIVER_VERSION 0x02010203
687#define RAM_SIZE 0x80000
688
689#define Z_FPGA_LOADED(X) ((readl(&(X)->init_ctrl) & (1<<17)) != 0)
690
691enum zblock_type {
692 ZBLOCK_PRG = 0,
693 ZBLOCK_FPGA = 1
694};
695
696struct zfile_header {
697 char name[64];
698 char date[32];
699 char aux[32];
700 u32 n_config;
701 u32 config_offset;
702 u32 n_blocks;
703 u32 block_offset;
704 u32 reserved[9];
705} __attribute__ ((packed));
706
707struct zfile_config {
708 char name[64];
709 u32 mailbox;
710 u32 function;
711 u32 n_blocks;
712 u32 block_list[ZL_MAX_BLOCKS];
713} __attribute__ ((packed));
714
715struct zfile_block {
716 u32 type;
717 u32 file_offset;
718 u32 ram_offset;
719 u32 size;
720} __attribute__ ((packed));
721
683static struct tty_driver *cy_serial_driver; 722static struct tty_driver *cy_serial_driver;
684 723
685#ifdef CONFIG_ISA 724#ifdef CONFIG_ISA
@@ -1851,11 +1890,11 @@ static void cyz_poll(unsigned long arg)
1851 struct cyclades_card *cinfo; 1890 struct cyclades_card *cinfo;
1852 struct cyclades_port *info; 1891 struct cyclades_port *info;
1853 struct tty_struct *tty; 1892 struct tty_struct *tty;
1854 static struct FIRM_ID *firm_id; 1893 struct FIRM_ID __iomem *firm_id;
1855 static struct ZFW_CTRL *zfw_ctrl; 1894 struct ZFW_CTRL __iomem *zfw_ctrl;
1856 static struct BOARD_CTRL *board_ctrl; 1895 struct BOARD_CTRL __iomem *board_ctrl;
1857 static struct CH_CTRL *ch_ctrl; 1896 struct CH_CTRL __iomem *ch_ctrl;
1858 static struct BUF_CTRL *buf_ctrl; 1897 struct BUF_CTRL __iomem *buf_ctrl;
1859 unsigned long expires = jiffies + HZ; 1898 unsigned long expires = jiffies + HZ;
1860 int card, port; 1899 int card, port;
1861 1900
@@ -1999,7 +2038,6 @@ static int startup(struct cyclades_port *info)
1999 struct ZFW_CTRL __iomem *zfw_ctrl; 2038 struct ZFW_CTRL __iomem *zfw_ctrl;
2000 struct BOARD_CTRL __iomem *board_ctrl; 2039 struct BOARD_CTRL __iomem *board_ctrl;
2001 struct CH_CTRL __iomem *ch_ctrl; 2040 struct CH_CTRL __iomem *ch_ctrl;
2002 int retval;
2003 2041
2004 base_addr = card->base_addr; 2042 base_addr = card->base_addr;
2005 2043
@@ -2371,7 +2409,6 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
2371 struct ZFW_CTRL __iomem *zfw_ctrl; 2409 struct ZFW_CTRL __iomem *zfw_ctrl;
2372 struct BOARD_CTRL __iomem *board_ctrl; 2410 struct BOARD_CTRL __iomem *board_ctrl;
2373 struct CH_CTRL __iomem *ch_ctrl; 2411 struct CH_CTRL __iomem *ch_ctrl;
2374 int retval;
2375 2412
2376 base_addr = cinfo->base_addr; 2413 base_addr = cinfo->base_addr;
2377 firm_id = base_addr + ID_ADDRESS; 2414 firm_id = base_addr + ID_ADDRESS;
@@ -4429,10 +4466,10 @@ static void cy_hangup(struct tty_struct *tty)
4429static int __devinit cy_init_card(struct cyclades_card *cinfo) 4466static int __devinit cy_init_card(struct cyclades_card *cinfo)
4430{ 4467{
4431 struct cyclades_port *info; 4468 struct cyclades_port *info;
4432 u32 mailbox; 4469 u32 uninitialized_var(mailbox);
4433 unsigned int nports; 4470 unsigned int nports;
4434 unsigned short chip_number; 4471 unsigned short chip_number;
4435 int index, port; 4472 int uninitialized_var(index), port;
4436 4473
4437 spin_lock_init(&cinfo->card_lock); 4474 spin_lock_init(&cinfo->card_lock);
4438 4475
@@ -4735,17 +4772,295 @@ static int __init cy_detect_isa(void)
4735} /* cy_detect_isa */ 4772} /* cy_detect_isa */
4736 4773
4737#ifdef CONFIG_PCI 4774#ifdef CONFIG_PCI
4738static void __devinit plx_init(void __iomem * addr, __u32 initctl) 4775static inline int __devinit cyc_isfwstr(const char *str, unsigned int size)
4776{
4777 unsigned int a;
4778
4779 for (a = 0; a < size && *str; a++, str++)
4780 if (*str & 0x80)
4781 return -EINVAL;
4782
4783 for (; a < size; a++, str++)
4784 if (*str)
4785 return -EINVAL;
4786
4787 return 0;
4788}
4789
4790static inline void __devinit cyz_fpga_copy(void __iomem *fpga, u8 *data,
4791 unsigned int size)
4792{
4793 for (; size > 0; size--) {
4794 cy_writel(fpga, *data++);
4795 udelay(10);
4796 }
4797}
4798
4799static void __devinit plx_init(struct pci_dev *pdev, int irq,
4800 struct RUNTIME_9060 __iomem *addr)
4739{ 4801{
4740 /* Reset PLX */ 4802 /* Reset PLX */
4741 cy_writel(addr + initctl, readl(addr + initctl) | 0x40000000); 4803 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x40000000);
4742 udelay(100L); 4804 udelay(100L);
4743 cy_writel(addr + initctl, readl(addr + initctl) & ~0x40000000); 4805 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x40000000);
4744 4806
4745 /* Reload Config. Registers from EEPROM */ 4807 /* Reload Config. Registers from EEPROM */
4746 cy_writel(addr + initctl, readl(addr + initctl) | 0x20000000); 4808 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) | 0x20000000);
4747 udelay(100L); 4809 udelay(100L);
4748 cy_writel(addr + initctl, readl(addr + initctl) & ~0x20000000); 4810 cy_writel(&addr->init_ctrl, readl(&addr->init_ctrl) & ~0x20000000);
4811
4812 /* For some yet unknown reason, once the PLX9060 reloads the EEPROM,
4813 * the IRQ is lost and, thus, we have to re-write it to the PCI config.
4814 * registers. This will remain here until we find a permanent fix.
4815 */
4816 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
4817}
4818
4819static int __devinit __cyz_load_fw(const struct firmware *fw,
4820 const char *name, const u32 mailbox, void __iomem *base,
4821 void __iomem *fpga)
4822{
4823 void *ptr = fw->data;
4824 struct zfile_header *h = ptr;
4825 struct zfile_config *c, *cs;
4826 struct zfile_block *b, *bs;
4827 unsigned int a, tmp, len = fw->size;
4828#define BAD_FW KERN_ERR "Bad firmware: "
4829 if (len < sizeof(*h)) {
4830 printk(BAD_FW "too short: %u<%zu\n", len, sizeof(*h));
4831 return -EINVAL;
4832 }
4833
4834 cs = ptr + h->config_offset;
4835 bs = ptr + h->block_offset;
4836
4837 if ((void *)(cs + h->n_config) > ptr + len ||
4838 (void *)(bs + h->n_blocks) > ptr + len) {
4839 printk(BAD_FW "too short");
4840 return -EINVAL;
4841 }
4842
4843 if (cyc_isfwstr(h->name, sizeof(h->name)) ||
4844 cyc_isfwstr(h->date, sizeof(h->date))) {
4845 printk(BAD_FW "bad formatted header string\n");
4846 return -EINVAL;
4847 }
4848
4849 if (strncmp(name, h->name, sizeof(h->name))) {
4850 printk(BAD_FW "bad name '%s' (expected '%s')\n", h->name, name);
4851 return -EINVAL;
4852 }
4853
4854 tmp = 0;
4855 for (c = cs; c < cs + h->n_config; c++) {
4856 for (a = 0; a < c->n_blocks; a++)
4857 if (c->block_list[a] > h->n_blocks) {
4858 printk(BAD_FW "bad block ref number in cfgs\n");
4859 return -EINVAL;
4860 }
4861 if (c->mailbox == mailbox && c->function == 0) /* 0 is normal */
4862 tmp++;
4863 }
4864 if (!tmp) {
4865 printk(BAD_FW "nothing appropriate\n");
4866 return -EINVAL;
4867 }
4868
4869 for (b = bs; b < bs + h->n_blocks; b++)
4870 if (b->file_offset + b->size > len) {
4871 printk(BAD_FW "bad block data offset\n");
4872 return -EINVAL;
4873 }
4874
4875 /* everything is OK, let's seek'n'load it */
4876 for (c = cs; c < cs + h->n_config; c++)
4877 if (c->mailbox == mailbox && c->function == 0)
4878 break;
4879
4880 for (a = 0; a < c->n_blocks; a++) {
4881 b = &bs[c->block_list[a]];
4882 if (b->type == ZBLOCK_FPGA) {
4883 if (fpga != NULL)
4884 cyz_fpga_copy(fpga, ptr + b->file_offset,
4885 b->size);
4886 } else {
4887 if (base != NULL)
4888 memcpy_toio(base + b->ram_offset,
4889 ptr + b->file_offset, b->size);
4890 }
4891 }
4892#undef BAD_FW
4893 return 0;
4894}
4895
4896static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
4897 struct RUNTIME_9060 __iomem *ctl_addr, int irq)
4898{
4899 const struct firmware *fw;
4900 struct FIRM_ID __iomem *fid = base_addr + ID_ADDRESS;
4901 struct CUSTOM_REG __iomem *cust = base_addr;
4902 struct ZFW_CTRL __iomem *pt_zfwctrl;
4903 void __iomem *tmp;
4904 u32 mailbox, status;
4905 unsigned int i;
4906 int retval;
4907
4908 retval = request_firmware(&fw, "cyzfirm.bin", &pdev->dev);
4909 if (retval) {
4910 dev_err(&pdev->dev, "can't get firmware\n");
4911 goto err;
4912 }
4913
4914 /* Check whether the firmware is already loaded and running. If
4915 positive, skip this board */
4916 if (Z_FPGA_LOADED(ctl_addr) && readl(&fid->signature) == ZFIRM_ID) {
4917 u32 cntval = readl(base_addr + 0x190);
4918
4919 udelay(100);
4920 if (cntval != readl(base_addr + 0x190)) {
4921 /* FW counter is working, FW is running */
4922 dev_dbg(&pdev->dev, "Cyclades-Z FW already loaded. "
4923 "Skipping board.\n");
4924 retval = 0;
4925 goto err_rel;
4926 }
4927 }
4928
4929 /* start boot */
4930 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) &
4931 ~0x00030800UL);
4932
4933 mailbox = readl(&ctl_addr->mail_box_0);
4934
4935 if (mailbox == 0 || Z_FPGA_LOADED(ctl_addr)) {
4936 /* stops CPU and set window to beginning of RAM */
4937 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4938 cy_writel(&cust->cpu_stop, 0);
4939 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4940 udelay(100);
4941 }
4942
4943 plx_init(pdev, irq, ctl_addr);
4944
4945 if (mailbox != 0) {
4946 /* load FPGA */
4947 retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, NULL,
4948 base_addr);
4949 if (retval)
4950 goto err_rel;
4951 if (!Z_FPGA_LOADED(ctl_addr)) {
4952 dev_err(&pdev->dev, "fw upload successful, but fw is "
4953 "not loaded\n");
4954 goto err_rel;
4955 }
4956 }
4957
4958 /* stops CPU and set window to beginning of RAM */
4959 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4960 cy_writel(&cust->cpu_stop, 0);
4961 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4962 udelay(100);
4963
4964 /* clear memory */
4965 for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
4966 cy_writeb(tmp, 255);
4967 if (mailbox != 0) {
4968 /* set window to last 512K of RAM */
4969 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
4970 //sleep(1);
4971 for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
4972 cy_writeb(tmp, 255);
4973 /* set window to beginning of RAM */
4974 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4975 //sleep(1);
4976 }
4977
4978 retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
4979 release_firmware(fw);
4980 if (retval)
4981 goto err;
4982
4983 /* finish boot and start boards */
4984 cy_writel(&ctl_addr->loc_addr_base, WIN_CREG);
4985 cy_writel(&cust->cpu_start, 0);
4986 cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
4987 i = 0;
4988 while ((status = readl(&fid->signature)) != ZFIRM_ID && i++ < 40)
4989 msleep(100);
4990 if (status != ZFIRM_ID) {
4991 if (status == ZFIRM_HLT) {
4992 dev_err(&pdev->dev, "you need an external power supply "
4993 "for this number of ports. Firmware halted and "
4994 "board reset.\n");
4995 retval = -EIO;
4996 goto err;
4997 }
4998 dev_warn(&pdev->dev, "fid->signature = 0x%x... Waiting "
4999 "some more time\n", status);
5000 while ((status = readl(&fid->signature)) != ZFIRM_ID &&
5001 i++ < 200)
5002 msleep(100);
5003 if (status != ZFIRM_ID) {
5004 dev_err(&pdev->dev, "Board not started in 20 seconds! "
5005 "Giving up. (fid->signature = 0x%x)\n",
5006 status);
5007 dev_info(&pdev->dev, "*** Warning ***: if you are "
5008 "upgrading the FW, please power cycle the "
5009 "system before loading the new FW to the "
5010 "Cyclades-Z.\n");
5011
5012 if (Z_FPGA_LOADED(ctl_addr))
5013 plx_init(pdev, irq, ctl_addr);
5014
5015 retval = -EIO;
5016 goto err;
5017 }
5018 dev_dbg(&pdev->dev, "Firmware started after %d seconds.\n",
5019 i / 10);
5020 }
5021 pt_zfwctrl = base_addr + readl(&fid->zfwctrl_addr);
5022
5023 dev_dbg(&pdev->dev, "fid=> %p, zfwctrl_addr=> %x, npt_zfwctrl=> %p\n",
5024 base_addr + ID_ADDRESS, readl(&fid->zfwctrl_addr),
5025 base_addr + readl(&fid->zfwctrl_addr));
5026
5027 dev_info(&pdev->dev, "Cyclades-Z FW loaded: version = %x, ports = %u\n",
5028 readl(&pt_zfwctrl->board_ctrl.fw_version),
5029 readl(&pt_zfwctrl->board_ctrl.n_channel));
5030
5031 if (readl(&pt_zfwctrl->board_ctrl.n_channel) == 0) {
5032 dev_warn(&pdev->dev, "no Cyclades-Z ports were found. Please "
5033 "check the connection between the Z host card and the "
5034 "serial expanders.\n");
5035
5036 if (Z_FPGA_LOADED(ctl_addr))
5037 plx_init(pdev, irq, ctl_addr);
5038
5039 dev_info(&pdev->dev, "Null number of ports detected. Board "
5040 "reset.\n");
5041 retval = 0;
5042 goto err;
5043 }
5044
5045 cy_writel(&pt_zfwctrl->board_ctrl.op_system, C_OS_LINUX);
5046 cy_writel(&pt_zfwctrl->board_ctrl.dr_version, DRIVER_VERSION);
5047
5048 /*
5049 Early firmware failed to start looking for commands.
5050 This enables firmware interrupts for those commands.
5051 */
5052 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
5053 (1 << 17));
5054 cy_writel(&ctl_addr->intr_ctrl_stat, readl(&ctl_addr->intr_ctrl_stat) |
5055 0x00030800UL);
5056
5057 plx_init(pdev, irq, ctl_addr);
5058
5059 return 0;
5060err_rel:
5061 release_firmware(fw);
5062err:
5063 return retval;
4749} 5064}
4750 5065
4751static int __devinit cy_pci_probe(struct pci_dev *pdev, 5066static int __devinit cy_pci_probe(struct pci_dev *pdev,
@@ -4827,16 +5142,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4827 } 5142 }
4828 5143
4829 /* Disable interrupts on the PLX before resetting it */ 5144 /* Disable interrupts on the PLX before resetting it */
4830 cy_writew(addr0 + 0x68, 5145 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) & ~0x0900);
4831 readw(addr0 + 0x68) & ~0x0900);
4832 5146
4833 plx_init(addr0, 0x6c); 5147 plx_init(pdev, irq, addr0);
4834 /* For some yet unknown reason, once the PLX9060 reloads
4835 the EEPROM, the IRQ is lost and, thus, we have to
4836 re-write it to the PCI config. registers.
4837 This will remain here until we find a permanent
4838 fix. */
4839 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
4840 5148
4841 mailbox = (u32)readl(&ctl_addr->mail_box_0); 5149 mailbox = (u32)readl(&ctl_addr->mail_box_0);
4842 5150
@@ -4877,6 +5185,9 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4877 if ((mailbox == ZO_V1) || (mailbox == ZO_V2)) 5185 if ((mailbox == ZO_V1) || (mailbox == ZO_V2))
4878 cy_writel(addr2 + ID_ADDRESS, 0L); 5186 cy_writel(addr2 + ID_ADDRESS, 0L);
4879 5187
5188 retval = cyz_load_fw(pdev, addr2, addr0, irq);
5189 if (retval)
5190 goto err_unmap;
4880 /* This must be a Cyclades-8Zo/PCI. The extendable 5191 /* This must be a Cyclades-8Zo/PCI. The extendable
4881 version will have a different device_id and will 5192 version will have a different device_id and will
4882 be allocated its maximum number of ports. */ 5193 be allocated its maximum number of ports. */
@@ -4953,15 +5264,7 @@ static int __devinit cy_pci_probe(struct pci_dev *pdev,
4953 case PLX_9060: 5264 case PLX_9060:
4954 case PLX_9080: 5265 case PLX_9080:
4955 default: /* Old boards, use PLX_9060 */ 5266 default: /* Old boards, use PLX_9060 */
4956 5267 plx_init(pdev, irq, addr0);
4957 plx_init(addr0, 0x6c);
4958 /* For some yet unknown reason, once the PLX9060 reloads
4959 the EEPROM, the IRQ is lost and, thus, we have to
4960 re-write it to the PCI config. registers.
4961 This will remain here until we find a permanent
4962 fix. */
4963 pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, irq);
4964
4965 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900); 5268 cy_writew(addr0 + 0x68, readw(addr0 + 0x68) | 0x0900);
4966 break; 5269 break;
4967 } 5270 }
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 9138b49e676e..ee83ff9efed6 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -72,6 +72,8 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
72 mutex_init(&dev->struct_mutex); 72 mutex_init(&dev->struct_mutex);
73 mutex_init(&dev->ctxlist_mutex); 73 mutex_init(&dev->ctxlist_mutex);
74 74
75 idr_init(&dev->drw_idr);
76
75 dev->pdev = pdev; 77 dev->pdev = pdev;
76 dev->pci_device = pdev->device; 78 dev->pci_device = pdev->device;
77 dev->pci_vendor = pdev->vendor; 79 dev->pci_vendor = pdev->vendor;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index 0580fa33cb77..441bbdbf1510 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -94,7 +94,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
94 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
95#if defined(CONFIG_FB_SIS) 95#if defined(CONFIG_FB_SIS)
96 { 96 {
97 drm_sman_mm_t sman_mm; 97 struct drm_sman_mm sman_mm;
98 sman_mm.private = (void *)0xFFFFFFFF; 98 sman_mm.private = (void *)0xFFFFFFFF;
99 sman_mm.allocate = sis_sman_mm_allocate; 99 sman_mm.allocate = sis_sman_mm_allocate;
100 sman_mm.free = sis_sman_mm_free; 100 sman_mm.free = sis_sman_mm_free;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index b3ab42e0dd4a..83c1151ec7a2 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -679,6 +679,7 @@ static int khvcd(void *unused)
679 int poll_mask; 679 int poll_mask;
680 struct hvc_struct *hp; 680 struct hvc_struct *hp;
681 681
682 set_freezable();
682 __set_current_state(TASK_RUNNING); 683 __set_current_state(TASK_RUNNING);
683 do { 684 do {
684 poll_mask = 0; 685 poll_mask = 0;
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 761f77740d67..77a7a4a06620 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -171,9 +171,6 @@ static struct pci_driver isicom_driver = {
171static int prev_card = 3; /* start servicing isi_card[0] */ 171static int prev_card = 3; /* start servicing isi_card[0] */
172static struct tty_driver *isicom_normal; 172static struct tty_driver *isicom_normal;
173 173
174static DECLARE_COMPLETION(isi_timerdone);
175static char re_schedule = 1;
176
177static void isicom_tx(unsigned long _data); 174static void isicom_tx(unsigned long _data);
178static void isicom_start(struct tty_struct *tty); 175static void isicom_start(struct tty_struct *tty);
179 176
@@ -187,7 +184,7 @@ static signed char linuxb_to_isib[] = {
187 184
188struct isi_board { 185struct isi_board {
189 unsigned long base; 186 unsigned long base;
190 unsigned char irq; 187 int irq;
191 unsigned char port_count; 188 unsigned char port_count;
192 unsigned short status; 189 unsigned short status;
193 unsigned short port_status; /* each bit for each port */ 190 unsigned short port_status; /* each bit for each port */
@@ -227,7 +224,7 @@ static struct isi_port isi_ports[PORT_COUNT];
227 * it wants to talk. 224 * it wants to talk.
228 */ 225 */
229 226
230static inline int WaitTillCardIsFree(u16 base) 227static inline int WaitTillCardIsFree(unsigned long base)
231{ 228{
232 unsigned int count = 0; 229 unsigned int count = 0;
233 unsigned int a = in_atomic(); /* do we run under spinlock? */ 230 unsigned int a = in_atomic(); /* do we run under spinlock? */
@@ -243,17 +240,18 @@ static inline int WaitTillCardIsFree(u16 base)
243 240
244static int lock_card(struct isi_board *card) 241static int lock_card(struct isi_board *card)
245{ 242{
246 char retries;
247 unsigned long base = card->base; 243 unsigned long base = card->base;
244 unsigned int retries, a;
248 245
249 for (retries = 0; retries < 100; retries++) { 246 for (retries = 0; retries < 10; retries++) {
250 spin_lock_irqsave(&card->card_lock, card->flags); 247 spin_lock_irqsave(&card->card_lock, card->flags);
251 if (inw(base + 0xe) & 0x1) { 248 for (a = 0; a < 10; a++) {
252 return 1; 249 if (inw(base + 0xe) & 0x1)
253 } else { 250 return 1;
254 spin_unlock_irqrestore(&card->card_lock, card->flags); 251 udelay(10);
255 udelay(1000); /* 1ms */
256 } 252 }
253 spin_unlock_irqrestore(&card->card_lock, card->flags);
254 msleep(10);
257 } 255 }
258 printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n", 256 printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
259 card->base); 257 card->base);
@@ -261,23 +259,6 @@ static int lock_card(struct isi_board *card)
261 return 0; /* Failed to acquire the card! */ 259 return 0; /* Failed to acquire the card! */
262} 260}
263 261
264static int lock_card_at_interrupt(struct isi_board *card)
265{
266 unsigned char retries;
267 unsigned long base = card->base;
268
269 for (retries = 0; retries < 200; retries++) {
270 spin_lock_irqsave(&card->card_lock, card->flags);
271
272 if (inw(base + 0xe) & 0x1)
273 return 1;
274 else
275 spin_unlock_irqrestore(&card->card_lock, card->flags);
276 }
277 /* Failing in interrupt is an acceptable event */
278 return 0; /* Failed to acquire the card! */
279}
280
281static void unlock_card(struct isi_board *card) 262static void unlock_card(struct isi_board *card)
282{ 263{
283 spin_unlock_irqrestore(&card->card_lock, card->flags); 264 spin_unlock_irqrestore(&card->card_lock, card->flags);
@@ -415,7 +396,9 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
415 396
416static void isicom_tx(unsigned long _data) 397static void isicom_tx(unsigned long _data)
417{ 398{
418 short count = (BOARD_COUNT-1), card, base; 399 unsigned long flags, base;
400 unsigned int retries;
401 short count = (BOARD_COUNT-1), card;
419 short txcount, wrd, residue, word_count, cnt; 402 short txcount, wrd, residue, word_count, cnt;
420 struct isi_port *port; 403 struct isi_port *port;
421 struct tty_struct *tty; 404 struct tty_struct *tty;
@@ -435,32 +418,34 @@ static void isicom_tx(unsigned long _data)
435 count = isi_card[card].port_count; 418 count = isi_card[card].port_count;
436 port = isi_card[card].ports; 419 port = isi_card[card].ports;
437 base = isi_card[card].base; 420 base = isi_card[card].base;
421
422 spin_lock_irqsave(&isi_card[card].card_lock, flags);
423 for (retries = 0; retries < 100; retries++) {
424 if (inw(base + 0xe) & 0x1)
425 break;
426 udelay(2);
427 }
428 if (retries >= 100)
429 goto unlock;
430
438 for (;count > 0;count--, port++) { 431 for (;count > 0;count--, port++) {
439 if (!lock_card_at_interrupt(&isi_card[card]))
440 continue;
441 /* port not active or tx disabled to force flow control */ 432 /* port not active or tx disabled to force flow control */
442 if (!(port->flags & ASYNC_INITIALIZED) || 433 if (!(port->flags & ASYNC_INITIALIZED) ||
443 !(port->status & ISI_TXOK)) 434 !(port->status & ISI_TXOK))
444 unlock_card(&isi_card[card]);
445 continue; 435 continue;
446 436
447 tty = port->tty; 437 tty = port->tty;
448 438
449 439 if (tty == NULL)
450 if (tty == NULL) {
451 unlock_card(&isi_card[card]);
452 continue; 440 continue;
453 }
454 441
455 txcount = min_t(short, TX_SIZE, port->xmit_cnt); 442 txcount = min_t(short, TX_SIZE, port->xmit_cnt);
456 if (txcount <= 0 || tty->stopped || tty->hw_stopped) { 443 if (txcount <= 0 || tty->stopped || tty->hw_stopped)
457 unlock_card(&isi_card[card]);
458 continue; 444 continue;
459 } 445
460 if (!(inw(base + 0x02) & (1 << port->channel))) { 446 if (!(inw(base + 0x02) & (1 << port->channel)))
461 unlock_card(&isi_card[card]);
462 continue; 447 continue;
463 } 448
464 pr_dbg("txing %d bytes, port%d.\n", txcount, 449 pr_dbg("txing %d bytes, port%d.\n", txcount,
465 port->channel + 1); 450 port->channel + 1);
466 outw((port->channel << isi_card[card].shift_count) | txcount, 451 outw((port->channel << isi_card[card].shift_count) | txcount,
@@ -508,16 +493,12 @@ static void isicom_tx(unsigned long _data)
508 port->status &= ~ISI_TXOK; 493 port->status &= ~ISI_TXOK;
509 if (port->xmit_cnt <= WAKEUP_CHARS) 494 if (port->xmit_cnt <= WAKEUP_CHARS)
510 tty_wakeup(tty); 495 tty_wakeup(tty);
511 unlock_card(&isi_card[card]);
512 } 496 }
513 497
498unlock:
499 spin_unlock_irqrestore(&isi_card[card].card_lock, flags);
514 /* schedule another tx for hopefully in about 10ms */ 500 /* schedule another tx for hopefully in about 10ms */
515sched_again: 501sched_again:
516 if (!re_schedule) {
517 complete(&isi_timerdone);
518 return;
519 }
520
521 mod_timer(&tx, jiffies + msecs_to_jiffies(10)); 502 mod_timer(&tx, jiffies + msecs_to_jiffies(10));
522} 503}
523 504
@@ -1749,17 +1730,13 @@ static unsigned int card_count;
1749static int __devinit isicom_probe(struct pci_dev *pdev, 1730static int __devinit isicom_probe(struct pci_dev *pdev,
1750 const struct pci_device_id *ent) 1731 const struct pci_device_id *ent)
1751{ 1732{
1752 unsigned int ioaddr, signature, index; 1733 unsigned int signature, index;
1753 int retval = -EPERM; 1734 int retval = -EPERM;
1754 u8 pciirq;
1755 struct isi_board *board = NULL; 1735 struct isi_board *board = NULL;
1756 1736
1757 if (card_count >= BOARD_COUNT) 1737 if (card_count >= BOARD_COUNT)
1758 goto err; 1738 goto err;
1759 1739
1760 ioaddr = pci_resource_start(pdev, 3);
1761 /* i.e at offset 0x1c in the PCI configuration register space. */
1762 pciirq = pdev->irq;
1763 dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device); 1740 dev_info(&pdev->dev, "ISI PCI Card(Device ID 0x%x)\n", ent->device);
1764 1741
1765 /* allot the first empty slot in the array */ 1742 /* allot the first empty slot in the array */
@@ -1770,8 +1747,8 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
1770 } 1747 }
1771 1748
1772 board->index = index; 1749 board->index = index;
1773 board->base = ioaddr; 1750 board->base = pci_resource_start(pdev, 3);
1774 board->irq = pciirq; 1751 board->irq = pdev->irq;
1775 card_count++; 1752 card_count++;
1776 1753
1777 pci_set_drvdata(pdev, board); 1754 pci_set_drvdata(pdev, board);
@@ -1901,9 +1878,7 @@ error:
1901 1878
1902static void __exit isicom_exit(void) 1879static void __exit isicom_exit(void)
1903{ 1880{
1904 re_schedule = 0; 1881 del_timer_sync(&tx);
1905
1906 wait_for_completion_timeout(&isi_timerdone, HZ);
1907 1882
1908 pci_unregister_driver(&isicom_driver); 1883 pci_unregister_driver(&isicom_driver);
1909 tty_unregister_driver(isicom_normal); 1884 tty_unregister_driver(isicom_normal);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 809409922996..3c66f402f9d7 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -2163,14 +2163,10 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
2163 cdkhdr_t __iomem *hdrp; 2163 cdkhdr_t __iomem *hdrp;
2164 cdkctrl_t __iomem *cp; 2164 cdkctrl_t __iomem *cp;
2165 unsigned char __iomem *bits; 2165 unsigned char __iomem *bits;
2166 unsigned long flags;
2167
2168 spin_lock_irqsave(&brd_lock, flags);
2169 2166
2170 if (test_bit(ST_CMDING, &portp->state)) { 2167 if (test_bit(ST_CMDING, &portp->state)) {
2171 printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n", 2168 printk(KERN_ERR "STALLION: command already busy, cmd=%x!\n",
2172 (int) cmd); 2169 (int) cmd);
2173 spin_unlock_irqrestore(&brd_lock, flags);
2174 return; 2170 return;
2175 } 2171 }
2176 2172
@@ -2191,7 +2187,6 @@ static void __stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigne
2191 writeb(readb(bits) | portp->portbit, bits); 2187 writeb(readb(bits) | portp->portbit, bits);
2192 set_bit(ST_CMDING, &portp->state); 2188 set_bit(ST_CMDING, &portp->state);
2193 EBRDDISABLE(brdp); 2189 EBRDDISABLE(brdp);
2194 spin_unlock_irqrestore(&brd_lock, flags);
2195} 2190}
2196 2191
2197static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback) 2192static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback)
@@ -3215,13 +3210,13 @@ static int stli_initecp(struct stlibrd *brdp)
3215 goto err; 3210 goto err;
3216 } 3211 }
3217 3212
3213 brdp->iosize = ECP_IOSIZE;
3214
3218 if (!request_region(brdp->iobase, brdp->iosize, "istallion")) { 3215 if (!request_region(brdp->iobase, brdp->iosize, "istallion")) {
3219 retval = -EIO; 3216 retval = -EIO;
3220 goto err; 3217 goto err;
3221 } 3218 }
3222 3219
3223 brdp->iosize = ECP_IOSIZE;
3224
3225/* 3220/*
3226 * Based on the specific board type setup the common vars to access 3221 * Based on the specific board type setup the common vars to access
3227 * and enable shared memory. Set all board specific information now 3222 * and enable shared memory. Set all board specific information now
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index e0d35c20c04f..ed76f0a127fd 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1405,7 +1405,6 @@ static int moxaCard;
1405static struct mon_str moxaLog; 1405static struct mon_str moxaLog;
1406static int moxaFuncTout = HZ / 2; 1406static int moxaFuncTout = HZ / 2;
1407 1407
1408static void moxadelay(int);
1409static void moxafunc(void __iomem *, int, ushort); 1408static void moxafunc(void __iomem *, int, ushort);
1410static void wait_finish(void __iomem *); 1409static void wait_finish(void __iomem *);
1411static void low_water_check(void __iomem *); 1410static void low_water_check(void __iomem *);
@@ -2404,10 +2403,10 @@ void MoxaPortSendBreak(int port, int ms100)
2404 ofsAddr = moxa_ports[port].tableAddr; 2403 ofsAddr = moxa_ports[port].tableAddr;
2405 if (ms100) { 2404 if (ms100) {
2406 moxafunc(ofsAddr, FC_SendBreak, Magic_code); 2405 moxafunc(ofsAddr, FC_SendBreak, Magic_code);
2407 moxadelay(ms100 * (HZ / 10)); 2406 msleep(ms100 * 10);
2408 } else { 2407 } else {
2409 moxafunc(ofsAddr, FC_SendBreak, Magic_code); 2408 moxafunc(ofsAddr, FC_SendBreak, Magic_code);
2410 moxadelay(HZ / 4); /* 250 ms */ 2409 msleep(250);
2411 } 2410 }
2412 moxafunc(ofsAddr, FC_StopBreak, Magic_code); 2411 moxafunc(ofsAddr, FC_StopBreak, Magic_code);
2413} 2412}
@@ -2476,18 +2475,6 @@ static int moxa_set_serial_info(struct moxa_port *info,
2476/***************************************************************************** 2475/*****************************************************************************
2477 * Static local functions: * 2476 * Static local functions: *
2478 *****************************************************************************/ 2477 *****************************************************************************/
2479/*
2480 * moxadelay - delays a specified number ticks
2481 */
2482static void moxadelay(int tick)
2483{
2484 unsigned long st, et;
2485
2486 st = jiffies;
2487 et = st + tick;
2488 while (time_before(jiffies, et));
2489}
2490
2491static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg) 2478static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
2492{ 2479{
2493 2480
@@ -2535,7 +2522,7 @@ static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
2535 return -EFAULT; 2522 return -EFAULT;
2536 baseAddr = moxa_boards[cardno].basemem; 2523 baseAddr = moxa_boards[cardno].basemem;
2537 writeb(HW_reset, baseAddr + Control_reg); /* reset */ 2524 writeb(HW_reset, baseAddr + Control_reg); /* reset */
2538 moxadelay(1); /* delay 10 ms */ 2525 msleep(10);
2539 for (i = 0; i < 4096; i++) 2526 for (i = 0; i < 4096; i++)
2540 writeb(0, baseAddr + i); /* clear fix page */ 2527 writeb(0, baseAddr + i); /* clear fix page */
2541 for (i = 0; i < len; i++) 2528 for (i = 0; i < len; i++)
@@ -2713,7 +2700,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2713 for (i = 0; i < 100; i++) { 2700 for (i = 0; i < 100; i++) {
2714 if (readw(baseAddr + C218_key) == keycode) 2701 if (readw(baseAddr + C218_key) == keycode)
2715 break; 2702 break;
2716 moxadelay(1); /* delay 10 ms */ 2703 msleep(10);
2717 } 2704 }
2718 if (readw(baseAddr + C218_key) != keycode) { 2705 if (readw(baseAddr + C218_key) != keycode) {
2719 return (-1); 2706 return (-1);
@@ -2725,7 +2712,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2725 for (i = 0; i < 100; i++) { 2712 for (i = 0; i < 100; i++) {
2726 if (readw(baseAddr + C218_key) == keycode) 2713 if (readw(baseAddr + C218_key) == keycode)
2727 break; 2714 break;
2728 moxadelay(1); /* delay 10 ms */ 2715 msleep(10);
2729 } 2716 }
2730 retry++; 2717 retry++;
2731 } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3)); 2718 } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
@@ -2736,7 +2723,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2736 for (i = 0; i < 100; i++) { 2723 for (i = 0; i < 100; i++) {
2737 if (readw(baseAddr + Magic_no) == Magic_code) 2724 if (readw(baseAddr + Magic_no) == Magic_code)
2738 break; 2725 break;
2739 moxadelay(1); /* delay 10 ms */ 2726 msleep(10);
2740 } 2727 }
2741 if (readw(baseAddr + Magic_no) != Magic_code) { 2728 if (readw(baseAddr + Magic_no) != Magic_code) {
2742 return (-1); 2729 return (-1);
@@ -2746,7 +2733,7 @@ static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
2746 for (i = 0; i < 100; i++) { 2733 for (i = 0; i < 100; i++) {
2747 if (readw(baseAddr + Magic_no) == Magic_code) 2734 if (readw(baseAddr + Magic_no) == Magic_code)
2748 break; 2735 break;
2749 moxadelay(1); /* delay 10 ms */ 2736 msleep(10);
2750 } 2737 }
2751 if (readw(baseAddr + Magic_no) != Magic_code) { 2738 if (readw(baseAddr + Magic_no) != Magic_code) {
2752 return (-1); 2739 return (-1);
@@ -2788,7 +2775,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2788 for (i = 0; i < 10; i++) { 2775 for (i = 0; i < 10; i++) {
2789 if (readw(baseAddr + C320_key) == C320_KeyCode) 2776 if (readw(baseAddr + C320_key) == C320_KeyCode)
2790 break; 2777 break;
2791 moxadelay(1); 2778 msleep(10);
2792 } 2779 }
2793 if (readw(baseAddr + C320_key) != C320_KeyCode) 2780 if (readw(baseAddr + C320_key) != C320_KeyCode)
2794 return (-1); 2781 return (-1);
@@ -2799,7 +2786,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2799 for (i = 0; i < 10; i++) { 2786 for (i = 0; i < 10; i++) {
2800 if (readw(baseAddr + C320_key) == C320_KeyCode) 2787 if (readw(baseAddr + C320_key) == C320_KeyCode)
2801 break; 2788 break;
2802 moxadelay(1); 2789 msleep(10);
2803 } 2790 }
2804 retry++; 2791 retry++;
2805 } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3)); 2792 } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
@@ -2809,7 +2796,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2809 for (i = 0; i < 600; i++) { 2796 for (i = 0; i < 600; i++) {
2810 if (readw(baseAddr + Magic_no) == Magic_code) 2797 if (readw(baseAddr + Magic_no) == Magic_code)
2811 break; 2798 break;
2812 moxadelay(1); 2799 msleep(10);
2813 } 2800 }
2814 if (readw(baseAddr + Magic_no) != Magic_code) 2801 if (readw(baseAddr + Magic_no) != Magic_code)
2815 return (-100); 2802 return (-100);
@@ -2828,7 +2815,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2828 for (i = 0; i < 500; i++) { 2815 for (i = 0; i < 500; i++) {
2829 if (readw(baseAddr + Magic_no) == Magic_code) 2816 if (readw(baseAddr + Magic_no) == Magic_code)
2830 break; 2817 break;
2831 moxadelay(1); 2818 msleep(10);
2832 } 2819 }
2833 if (readw(baseAddr + Magic_no) != Magic_code) 2820 if (readw(baseAddr + Magic_no) != Magic_code)
2834 return (-102); 2821 return (-102);
@@ -2842,7 +2829,7 @@ static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPor
2842 for (i = 0; i < 600; i++) { 2829 for (i = 0; i < 600; i++) {
2843 if (readw(baseAddr + Magic_no) == Magic_code) 2830 if (readw(baseAddr + Magic_no) == Magic_code)
2844 break; 2831 break;
2845 moxadelay(1); 2832 msleep(10);
2846 } 2833 }
2847 if (readw(baseAddr + Magic_no) != Magic_code) 2834 if (readw(baseAddr + Magic_no) != Magic_code)
2848 return (-102); 2835 return (-102);
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 3494e3fc44bf..b37e626f4faa 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -213,14 +213,6 @@ static inline void rc_release_io_range(struct riscom_board * const bp)
213 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); 213 release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
214} 214}
215 215
216/* Must be called with enabled interrupts */
217static inline void rc_long_delay(unsigned long delay)
218{
219 unsigned long i;
220
221 for (i = jiffies + delay; time_after(i,jiffies); ) ;
222}
223
224/* Reset and setup CD180 chip */ 216/* Reset and setup CD180 chip */
225static void __init rc_init_CD180(struct riscom_board const * bp) 217static void __init rc_init_CD180(struct riscom_board const * bp)
226{ 218{
@@ -231,7 +223,7 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
231 rc_wait_CCR(bp); /* Wait for CCR ready */ 223 rc_wait_CCR(bp); /* Wait for CCR ready */
232 rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */ 224 rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
233 sti(); 225 sti();
234 rc_long_delay(HZ/20); /* Delay 0.05 sec */ 226 msleep(50); /* Delay 0.05 sec */
235 cli(); 227 cli();
236 rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */ 228 rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
237 rc_out(bp, CD180_GICR, 0); /* Clear all bits */ 229 rc_out(bp, CD180_GICR, 0); /* Clear all bits */
@@ -280,7 +272,7 @@ static int __init rc_probe(struct riscom_board *bp)
280 rc_wait_CCR(bp); 272 rc_wait_CCR(bp);
281 rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */ 273 rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */
282 rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */ 274 rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */
283 rc_long_delay(HZ/20); 275 msleep(50);
284 irqs = probe_irq_off(irqs); 276 irqs = probe_irq_off(irqs);
285 val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */ 277 val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */
286 val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */ 278 val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index baf7234b6e66..455855631aef 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -345,18 +345,6 @@ static inline void sx_release_io_range(struct specialix_board * bp)
345} 345}
346 346
347 347
348/* Must be called with enabled interrupts */
349/* Ugly. Very ugly. Don't use this for anything else than initialization
350 code */
351static inline void sx_long_delay(unsigned long delay)
352{
353 unsigned long i;
354
355 for (i = jiffies + delay; time_after(i, jiffies); ) ;
356}
357
358
359
360/* Set the IRQ using the RTS lines that run to the PAL on the board.... */ 348/* Set the IRQ using the RTS lines that run to the PAL on the board.... */
361static int sx_set_irq ( struct specialix_board *bp) 349static int sx_set_irq ( struct specialix_board *bp)
362{ 350{
@@ -397,7 +385,7 @@ static int sx_init_CD186x(struct specialix_board * bp)
397 spin_lock_irqsave(&bp->lock, flags); 385 spin_lock_irqsave(&bp->lock, flags);
398 sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */ 386 sx_out_off(bp, CD186x_CCR, CCR_HARDRESET); /* Reset CD186x chip */
399 spin_unlock_irqrestore(&bp->lock, flags); 387 spin_unlock_irqrestore(&bp->lock, flags);
400 sx_long_delay(HZ/20); /* Delay 0.05 sec */ 388 msleep(50); /* Delay 0.05 sec */
401 spin_lock_irqsave(&bp->lock, flags); 389 spin_lock_irqsave(&bp->lock, flags);
402 sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */ 390 sx_out_off(bp, CD186x_GIVR, SX_ID); /* Set ID for this chip */
403 sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */ 391 sx_out_off(bp, CD186x_GICR, 0); /* Clear all bits */
@@ -533,7 +521,7 @@ static int sx_probe(struct specialix_board *bp)
533 sx_wait_CCR(bp); 521 sx_wait_CCR(bp);
534 sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */ 522 sx_out(bp, CD186x_CCR, CCR_TXEN); /* Enable transmitter */
535 sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */ 523 sx_out(bp, CD186x_IER, IER_TXRDY); /* Enable tx empty intr */
536 sx_long_delay(HZ/20); 524 msleep(50);
537 irqs = probe_irq_off(irqs); 525 irqs = probe_irq_off(irqs);
538 526
539 dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR)); 527 dprintk (SX_DEBUG_INIT, "SRSR = %02x, ", sx_in(bp, CD186x_SRSR));
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 8c73ccb8830f..93d0bb8b4c0f 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -1788,7 +1788,6 @@ static void stl_offintr(struct work_struct *work)
1788 if (tty == NULL) 1788 if (tty == NULL)
1789 return; 1789 return;
1790 1790
1791 lock_kernel();
1792 if (test_bit(ASYI_TXLOW, &portp->istate)) 1791 if (test_bit(ASYI_TXLOW, &portp->istate))
1793 tty_wakeup(tty); 1792 tty_wakeup(tty);
1794 1793
@@ -1802,7 +1801,6 @@ static void stl_offintr(struct work_struct *work)
1802 if (portp->flags & ASYNC_CHECK_CD) 1801 if (portp->flags & ASYNC_CHECK_CD)
1803 tty_hangup(tty); /* FIXME: module removal race here - AKPM */ 1802 tty_hangup(tty); /* FIXME: module removal race here - AKPM */
1804 } 1803 }
1805 unlock_kernel();
1806} 1804}
1807 1805
1808/*****************************************************************************/ 1806/*****************************************************************************/
@@ -2357,9 +2355,6 @@ static int __devinit stl_pciprobe(struct pci_dev *pdev,
2357 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) 2355 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE)
2358 goto err; 2356 goto err;
2359 2357
2360 dev_info(&pdev->dev, "please, report this to LKML: %x/%x/%x\n",
2361 pdev->vendor, pdev->device, pdev->class);
2362
2363 retval = pci_enable_device(pdev); 2358 retval = pci_enable_device(pdev);
2364 if (retval) 2359 if (retval)
2365 goto err; 2360 goto err;
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 6650ae1c088f..edb7002a3216 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -729,10 +729,9 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
729 /* although the numbers above are not valid since long ago, the 729 /* although the numbers above are not valid since long ago, the
730 point is still up-to-date and the comment still has its value 730 point is still up-to-date and the comment still has its value
731 even if only as a historical artifact. --mj, July 1998 */ 731 even if only as a historical artifact. --mj, July 1998 */
732 vc = kmalloc(sizeof(struct vc_data), GFP_KERNEL); 732 vc = kzalloc(sizeof(struct vc_data), GFP_KERNEL);
733 if (!vc) 733 if (!vc)
734 return -ENOMEM; 734 return -ENOMEM;
735 memset(vc, 0, sizeof(*vc));
736 vc_cons[currcons].d = vc; 735 vc_cons[currcons].d = vc;
737 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); 736 INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
738 visual_init(vc, currcons, 1); 737 visual_init(vc, currcons, 1);
@@ -1991,8 +1990,7 @@ static int is_double_width(uint32_t ucs)
1991 { 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 }, 1990 { 0xFE10, 0xFE19 }, { 0xFE30, 0xFE6F }, { 0xFF00, 0xFF60 },
1992 { 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD } 1991 { 0xFFE0, 0xFFE6 }, { 0x20000, 0x2FFFD }, { 0x30000, 0x3FFFD }
1993 }; 1992 };
1994 return bisearch(ucs, double_width, 1993 return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
1995 sizeof(double_width) / sizeof(*double_width) - 1);
1996} 1994}
1997 1995
1998/* acquires console_sem */ 1996/* acquires console_sem */
@@ -2989,8 +2987,24 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
2989 return retval; 2987 return retval;
2990} 2988}
2991 2989
2992static int unbind_con_driver(const struct consw *csw, int first, int last, 2990/**
2993 int deflt) 2991 * unbind_con_driver - unbind a console driver
2992 * @csw: pointer to console driver to unregister
2993 * @first: first in range of consoles that @csw should be unbound from
2994 * @last: last in range of consoles that @csw should be unbound from
2995 * @deflt: should next bound console driver be default after @csw is unbound?
2996 *
2997 * To unbind a driver from all possible consoles, pass 0 as @first and
2998 * %MAX_NR_CONSOLES as @last.
2999 *
3000 * @deflt controls whether the console that ends up replacing @csw should be
3001 * the default console.
3002 *
3003 * RETURNS:
3004 * -ENODEV if @csw isn't a registered console driver or can't be unregistered
3005 * or 0 on success.
3006 */
3007int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
2994{ 3008{
2995 struct module *owner = csw->owner; 3009 struct module *owner = csw->owner;
2996 const struct consw *defcsw = NULL; 3010 const struct consw *defcsw = NULL;
@@ -3075,6 +3089,7 @@ err:
3075 return retval; 3089 return retval;
3076 3090
3077} 3091}
3092EXPORT_SYMBOL(unbind_con_driver);
3078 3093
3079static int vt_bind(struct con_driver *con) 3094static int vt_bind(struct con_driver *con)
3080{ 3095{
@@ -3491,9 +3506,6 @@ void do_blank_screen(int entering_gfx)
3491 } 3506 }
3492 return; 3507 return;
3493 } 3508 }
3494 if (blank_state != blank_normal_wait)
3495 return;
3496 blank_state = blank_off;
3497 3509
3498 /* entering graphics mode? */ 3510 /* entering graphics mode? */
3499 if (entering_gfx) { 3511 if (entering_gfx) {
@@ -3501,10 +3513,15 @@ void do_blank_screen(int entering_gfx)
3501 save_screen(vc); 3513 save_screen(vc);
3502 vc->vc_sw->con_blank(vc, -1, 1); 3514 vc->vc_sw->con_blank(vc, -1, 1);
3503 console_blanked = fg_console + 1; 3515 console_blanked = fg_console + 1;
3516 blank_state = blank_off;
3504 set_origin(vc); 3517 set_origin(vc);
3505 return; 3518 return;
3506 } 3519 }
3507 3520
3521 if (blank_state != blank_normal_wait)
3522 return;
3523 blank_state = blank_off;
3524
3508 /* don't blank graphics */ 3525 /* don't blank graphics */
3509 if (vc->vc_mode != KD_TEXT) { 3526 if (vc->vc_mode != KD_TEXT) {
3510 console_blanked = fg_console + 1; 3527 console_blanked = fg_console + 1;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7b622300d0e5..804875de5801 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -1906,6 +1906,7 @@ static void do_edac_check(void)
1906 1906
1907static int edac_kernel_thread(void *arg) 1907static int edac_kernel_thread(void *arg)
1908{ 1908{
1909 set_freezable();
1909 while (!kthread_should_stop()) { 1910 while (!kthread_should_stop()) {
1910 do_edac_check(); 1911 do_edac_check();
1911 1912
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 7eaae3834e15..275d392eca61 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -96,6 +96,10 @@ static int __devinit lm70_probe(struct spi_device *spi)
96 struct lm70 *p_lm70; 96 struct lm70 *p_lm70;
97 int status; 97 int status;
98 98
99 /* signaling is SPI_MODE_0 on a 3-wire link (shared SI/SO) */
100 if ((spi->mode & (SPI_CPOL|SPI_CPHA)) || !(spi->mode & SPI_3WIRE))
101 return -EINVAL;
102
99 p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL); 103 p_lm70 = kzalloc(sizeof *p_lm70, GFP_KERNEL);
100 if (!p_lm70) 104 if (!p_lm70)
101 return -ENOMEM; 105 return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 03188d277af1..17cecf1ea797 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -630,7 +630,7 @@ static struct i2c_adapter pmcmsptwi_adapter = {
630static struct platform_driver pmcmsptwi_driver = { 630static struct platform_driver pmcmsptwi_driver = {
631 .probe = pmcmsptwi_probe, 631 .probe = pmcmsptwi_probe,
632 .remove = __devexit_p(pmcmsptwi_remove), 632 .remove = __devexit_p(pmcmsptwi_remove),
633 .driver { 633 .driver = {
634 .name = DRV_NAME, 634 .name = DRV_NAME,
635 .owner = THIS_MODULE, 635 .owner = THIS_MODULE,
636 }, 636 },
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index 3944e889cb21..2e1c24f671cf 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -153,4 +153,14 @@ config SENSORS_TSL2550
153 This driver can also be built as a module. If so, the module 153 This driver can also be built as a module. If so, the module
154 will be called tsl2550. 154 will be called tsl2550.
155 155
156config MENELAUS
157 bool "TWL92330/Menelaus PM chip"
158 depends on I2C=y && ARCH_OMAP24XX
159 help
160 If you say yes here you get support for the Texas Instruments
161 TWL92330/Menelaus Power Management chip. This include voltage
162 regulators, Dual slot memory card tranceivers, real-time clock
163 and other features that are often used in portable devices like
164 cell phones and PDAs.
165
156endmenu 166endmenu
diff --git a/drivers/i2c/chips/Makefile b/drivers/i2c/chips/Makefile
index d8cbeb3f4b63..ca924e105959 100644
--- a/drivers/i2c/chips/Makefile
+++ b/drivers/i2c/chips/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_PCF8574) += pcf8574.o
13obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o 13obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
14obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 14obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
15obj-$(CONFIG_TPS65010) += tps65010.o 15obj-$(CONFIG_TPS65010) += tps65010.o
16obj-$(CONFIG_MENELAUS) += menelaus.o
16obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o 17obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
17 18
18ifeq ($(CONFIG_I2C_DEBUG_CHIP),y) 19ifeq ($(CONFIG_I2C_DEBUG_CHIP),y)
diff --git a/drivers/i2c/chips/menelaus.c b/drivers/i2c/chips/menelaus.c
new file mode 100644
index 000000000000..48a7e2f0bdd3
--- /dev/null
+++ b/drivers/i2c/chips/menelaus.c
@@ -0,0 +1,1281 @@
1#define DEBUG
2/*
3 * Copyright (C) 2004 Texas Instruments, Inc.
4 *
5 * Some parts based tps65010.c:
6 * Copyright (C) 2004 Texas Instruments and
7 * Copyright (C) 2004-2005 David Brownell
8 *
9 * Some parts based on tlv320aic24.c:
10 * Copyright (C) by Kai Svahn <kai.svahn@nokia.com>
11 *
12 * Changes for interrupt handling and clean-up by
13 * Tony Lindgren <tony@atomide.com> and Imre Deak <imre.deak@nokia.com>
14 * Cleanup and generalized support for voltage setting by
15 * Juha Yrjola
16 * Added support for controlling VCORE and regulator sleep states,
17 * Amit Kucheria <amit.kucheria@nokia.com>
18 * Copyright (C) 2005, 2006 Nokia Corporation
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 */
34
35#include <linux/module.h>
36#include <linux/i2c.h>
37#include <linux/interrupt.h>
38#include <linux/sched.h>
39#include <linux/mutex.h>
40#include <linux/workqueue.h>
41#include <linux/delay.h>
42#include <linux/rtc.h>
43#include <linux/bcd.h>
44
45#include <asm/mach-types.h>
46#include <asm/mach/irq.h>
47
48#include <asm/arch/gpio.h>
49#include <asm/arch/menelaus.h>
50
51#define DRIVER_NAME "menelaus"
52
53#define pr_err(fmt, arg...) printk(KERN_ERR DRIVER_NAME ": ", ## arg);
54
55#define MENELAUS_I2C_ADDRESS 0x72
56
57#define MENELAUS_REV 0x01
58#define MENELAUS_VCORE_CTRL1 0x02
59#define MENELAUS_VCORE_CTRL2 0x03
60#define MENELAUS_VCORE_CTRL3 0x04
61#define MENELAUS_VCORE_CTRL4 0x05
62#define MENELAUS_VCORE_CTRL5 0x06
63#define MENELAUS_DCDC_CTRL1 0x07
64#define MENELAUS_DCDC_CTRL2 0x08
65#define MENELAUS_DCDC_CTRL3 0x09
66#define MENELAUS_LDO_CTRL1 0x0A
67#define MENELAUS_LDO_CTRL2 0x0B
68#define MENELAUS_LDO_CTRL3 0x0C
69#define MENELAUS_LDO_CTRL4 0x0D
70#define MENELAUS_LDO_CTRL5 0x0E
71#define MENELAUS_LDO_CTRL6 0x0F
72#define MENELAUS_LDO_CTRL7 0x10
73#define MENELAUS_LDO_CTRL8 0x11
74#define MENELAUS_SLEEP_CTRL1 0x12
75#define MENELAUS_SLEEP_CTRL2 0x13
76#define MENELAUS_DEVICE_OFF 0x14
77#define MENELAUS_OSC_CTRL 0x15
78#define MENELAUS_DETECT_CTRL 0x16
79#define MENELAUS_INT_MASK1 0x17
80#define MENELAUS_INT_MASK2 0x18
81#define MENELAUS_INT_STATUS1 0x19
82#define MENELAUS_INT_STATUS2 0x1A
83#define MENELAUS_INT_ACK1 0x1B
84#define MENELAUS_INT_ACK2 0x1C
85#define MENELAUS_GPIO_CTRL 0x1D
86#define MENELAUS_GPIO_IN 0x1E
87#define MENELAUS_GPIO_OUT 0x1F
88#define MENELAUS_BBSMS 0x20
89#define MENELAUS_RTC_CTRL 0x21
90#define MENELAUS_RTC_UPDATE 0x22
91#define MENELAUS_RTC_SEC 0x23
92#define MENELAUS_RTC_MIN 0x24
93#define MENELAUS_RTC_HR 0x25
94#define MENELAUS_RTC_DAY 0x26
95#define MENELAUS_RTC_MON 0x27
96#define MENELAUS_RTC_YR 0x28
97#define MENELAUS_RTC_WKDAY 0x29
98#define MENELAUS_RTC_AL_SEC 0x2A
99#define MENELAUS_RTC_AL_MIN 0x2B
100#define MENELAUS_RTC_AL_HR 0x2C
101#define MENELAUS_RTC_AL_DAY 0x2D
102#define MENELAUS_RTC_AL_MON 0x2E
103#define MENELAUS_RTC_AL_YR 0x2F
104#define MENELAUS_RTC_COMP_MSB 0x30
105#define MENELAUS_RTC_COMP_LSB 0x31
106#define MENELAUS_S1_PULL_EN 0x32
107#define MENELAUS_S1_PULL_DIR 0x33
108#define MENELAUS_S2_PULL_EN 0x34
109#define MENELAUS_S2_PULL_DIR 0x35
110#define MENELAUS_MCT_CTRL1 0x36
111#define MENELAUS_MCT_CTRL2 0x37
112#define MENELAUS_MCT_CTRL3 0x38
113#define MENELAUS_MCT_PIN_ST 0x39
114#define MENELAUS_DEBOUNCE1 0x3A
115
116#define IH_MENELAUS_IRQS 12
117#define MENELAUS_MMC_S1CD_IRQ 0 /* MMC slot 1 card change */
118#define MENELAUS_MMC_S2CD_IRQ 1 /* MMC slot 2 card change */
119#define MENELAUS_MMC_S1D1_IRQ 2 /* MMC DAT1 low in slot 1 */
120#define MENELAUS_MMC_S2D1_IRQ 3 /* MMC DAT1 low in slot 2 */
121#define MENELAUS_LOWBAT_IRQ 4 /* Low battery */
122#define MENELAUS_HOTDIE_IRQ 5 /* Hot die detect */
123#define MENELAUS_UVLO_IRQ 6 /* UVLO detect */
124#define MENELAUS_TSHUT_IRQ 7 /* Thermal shutdown */
125#define MENELAUS_RTCTMR_IRQ 8 /* RTC timer */
126#define MENELAUS_RTCALM_IRQ 9 /* RTC alarm */
127#define MENELAUS_RTCERR_IRQ 10 /* RTC error */
128#define MENELAUS_PSHBTN_IRQ 11 /* Push button */
129#define MENELAUS_RESERVED12_IRQ 12 /* Reserved */
130#define MENELAUS_RESERVED13_IRQ 13 /* Reserved */
131#define MENELAUS_RESERVED14_IRQ 14 /* Reserved */
132#define MENELAUS_RESERVED15_IRQ 15 /* Reserved */
133
134static void menelaus_work(struct work_struct *_menelaus);
135
136struct menelaus_chip {
137 struct mutex lock;
138 struct i2c_client *client;
139 struct work_struct work;
140#ifdef CONFIG_RTC_DRV_TWL92330
141 struct rtc_device *rtc;
142 u8 rtc_control;
143 unsigned uie:1;
144#endif
145 unsigned vcore_hw_mode:1;
146 u8 mask1, mask2;
147 void (*handlers[16])(struct menelaus_chip *);
148 void (*mmc_callback)(void *data, u8 mask);
149 void *mmc_callback_data;
150};
151
152static struct menelaus_chip *the_menelaus;
153
154static int menelaus_write_reg(int reg, u8 value)
155{
156 int val = i2c_smbus_write_byte_data(the_menelaus->client, reg, value);
157
158 if (val < 0) {
159 pr_err("write error");
160 return val;
161 }
162
163 return 0;
164}
165
166static int menelaus_read_reg(int reg)
167{
168 int val = i2c_smbus_read_byte_data(the_menelaus->client, reg);
169
170 if (val < 0)
171 pr_err("read error");
172
173 return val;
174}
175
176static int menelaus_enable_irq(int irq)
177{
178 if (irq > 7) {
179 irq -= 8;
180 the_menelaus->mask2 &= ~(1 << irq);
181 return menelaus_write_reg(MENELAUS_INT_MASK2,
182 the_menelaus->mask2);
183 } else {
184 the_menelaus->mask1 &= ~(1 << irq);
185 return menelaus_write_reg(MENELAUS_INT_MASK1,
186 the_menelaus->mask1);
187 }
188}
189
190static int menelaus_disable_irq(int irq)
191{
192 if (irq > 7) {
193 irq -= 8;
194 the_menelaus->mask2 |= (1 << irq);
195 return menelaus_write_reg(MENELAUS_INT_MASK2,
196 the_menelaus->mask2);
197 } else {
198 the_menelaus->mask1 |= (1 << irq);
199 return menelaus_write_reg(MENELAUS_INT_MASK1,
200 the_menelaus->mask1);
201 }
202}
203
204static int menelaus_ack_irq(int irq)
205{
206 if (irq > 7)
207 return menelaus_write_reg(MENELAUS_INT_ACK2, 1 << (irq - 8));
208 else
209 return menelaus_write_reg(MENELAUS_INT_ACK1, 1 << irq);
210}
211
212/* Adds a handler for an interrupt. Does not run in interrupt context */
213static int menelaus_add_irq_work(int irq,
214 void (*handler)(struct menelaus_chip *))
215{
216 int ret = 0;
217
218 mutex_lock(&the_menelaus->lock);
219 the_menelaus->handlers[irq] = handler;
220 ret = menelaus_enable_irq(irq);
221 mutex_unlock(&the_menelaus->lock);
222
223 return ret;
224}
225
226/* Removes handler for an interrupt */
227static int menelaus_remove_irq_work(int irq)
228{
229 int ret = 0;
230
231 mutex_lock(&the_menelaus->lock);
232 ret = menelaus_disable_irq(irq);
233 the_menelaus->handlers[irq] = NULL;
234 mutex_unlock(&the_menelaus->lock);
235
236 return ret;
237}
238
239/*
240 * Gets scheduled when a card detect interrupt happens. Note that in some cases
241 * this line is wired to card cover switch rather than the card detect switch
242 * in each slot. In this case the cards are not seen by menelaus.
243 * FIXME: Add handling for D1 too
244 */
245static void menelaus_mmc_cd_work(struct menelaus_chip *menelaus_hw)
246{
247 int reg;
248 unsigned char card_mask = 0;
249
250 reg = menelaus_read_reg(MENELAUS_MCT_PIN_ST);
251 if (reg < 0)
252 return;
253
254 if (!(reg & 0x1))
255 card_mask |= (1 << 0);
256
257 if (!(reg & 0x2))
258 card_mask |= (1 << 1);
259
260 if (menelaus_hw->mmc_callback)
261 menelaus_hw->mmc_callback(menelaus_hw->mmc_callback_data,
262 card_mask);
263}
264
265/*
266 * Toggles the MMC slots between open-drain and push-pull mode.
267 */
268int menelaus_set_mmc_opendrain(int slot, int enable)
269{
270 int ret, val;
271
272 if (slot != 1 && slot != 2)
273 return -EINVAL;
274 mutex_lock(&the_menelaus->lock);
275 ret = menelaus_read_reg(MENELAUS_MCT_CTRL1);
276 if (ret < 0) {
277 mutex_unlock(&the_menelaus->lock);
278 return ret;
279 }
280 val = ret;
281 if (slot == 1) {
282 if (enable)
283 val |= 1 << 2;
284 else
285 val &= ~(1 << 2);
286 } else {
287 if (enable)
288 val |= 1 << 3;
289 else
290 val &= ~(1 << 3);
291 }
292 ret = menelaus_write_reg(MENELAUS_MCT_CTRL1, val);
293 mutex_unlock(&the_menelaus->lock);
294
295 return ret;
296}
297EXPORT_SYMBOL(menelaus_set_mmc_opendrain);
298
299int menelaus_set_slot_sel(int enable)
300{
301 int ret;
302
303 mutex_lock(&the_menelaus->lock);
304 ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
305 if (ret < 0)
306 goto out;
307 ret |= 0x02;
308 if (enable)
309 ret |= 1 << 5;
310 else
311 ret &= ~(1 << 5);
312 ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
313out:
314 mutex_unlock(&the_menelaus->lock);
315 return ret;
316}
317EXPORT_SYMBOL(menelaus_set_slot_sel);
318
319int menelaus_set_mmc_slot(int slot, int enable, int power, int cd_en)
320{
321 int ret, val;
322
323 if (slot != 1 && slot != 2)
324 return -EINVAL;
325 if (power >= 3)
326 return -EINVAL;
327
328 mutex_lock(&the_menelaus->lock);
329
330 ret = menelaus_read_reg(MENELAUS_MCT_CTRL2);
331 if (ret < 0)
332 goto out;
333 val = ret;
334 if (slot == 1) {
335 if (cd_en)
336 val |= (1 << 4) | (1 << 6);
337 else
338 val &= ~((1 << 4) | (1 << 6));
339 } else {
340 if (cd_en)
341 val |= (1 << 5) | (1 << 7);
342 else
343 val &= ~((1 << 5) | (1 << 7));
344 }
345 ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, val);
346 if (ret < 0)
347 goto out;
348
349 ret = menelaus_read_reg(MENELAUS_MCT_CTRL3);
350 if (ret < 0)
351 goto out;
352 val = ret;
353 if (slot == 1) {
354 if (enable)
355 val |= 1 << 0;
356 else
357 val &= ~(1 << 0);
358 } else {
359 int b;
360
361 if (enable)
362 ret |= 1 << 1;
363 else
364 ret &= ~(1 << 1);
365 b = menelaus_read_reg(MENELAUS_MCT_CTRL2);
366 b &= ~0x03;
367 b |= power;
368 ret = menelaus_write_reg(MENELAUS_MCT_CTRL2, b);
369 if (ret < 0)
370 goto out;
371 }
372 /* Disable autonomous shutdown */
373 val &= ~(0x03 << 2);
374 ret = menelaus_write_reg(MENELAUS_MCT_CTRL3, val);
375out:
376 mutex_unlock(&the_menelaus->lock);
377 return ret;
378}
379EXPORT_SYMBOL(menelaus_set_mmc_slot);
380
381int menelaus_register_mmc_callback(void (*callback)(void *data, u8 card_mask),
382 void *data)
383{
384 int ret = 0;
385
386 the_menelaus->mmc_callback_data = data;
387 the_menelaus->mmc_callback = callback;
388 ret = menelaus_add_irq_work(MENELAUS_MMC_S1CD_IRQ,
389 menelaus_mmc_cd_work);
390 if (ret < 0)
391 return ret;
392 ret = menelaus_add_irq_work(MENELAUS_MMC_S2CD_IRQ,
393 menelaus_mmc_cd_work);
394 if (ret < 0)
395 return ret;
396 ret = menelaus_add_irq_work(MENELAUS_MMC_S1D1_IRQ,
397 menelaus_mmc_cd_work);
398 if (ret < 0)
399 return ret;
400 ret = menelaus_add_irq_work(MENELAUS_MMC_S2D1_IRQ,
401 menelaus_mmc_cd_work);
402
403 return ret;
404}
405EXPORT_SYMBOL(menelaus_register_mmc_callback);
406
407void menelaus_unregister_mmc_callback(void)
408{
409 menelaus_remove_irq_work(MENELAUS_MMC_S1CD_IRQ);
410 menelaus_remove_irq_work(MENELAUS_MMC_S2CD_IRQ);
411 menelaus_remove_irq_work(MENELAUS_MMC_S1D1_IRQ);
412 menelaus_remove_irq_work(MENELAUS_MMC_S2D1_IRQ);
413
414 the_menelaus->mmc_callback = NULL;
415 the_menelaus->mmc_callback_data = 0;
416}
417EXPORT_SYMBOL(menelaus_unregister_mmc_callback);
418
419struct menelaus_vtg {
420 const char *name;
421 u8 vtg_reg;
422 u8 vtg_shift;
423 u8 vtg_bits;
424 u8 mode_reg;
425};
426
427struct menelaus_vtg_value {
428 u16 vtg;
429 u16 val;
430};
431
432static int menelaus_set_voltage(const struct menelaus_vtg *vtg, int mV,
433 int vtg_val, int mode)
434{
435 int val, ret;
436 struct i2c_client *c = the_menelaus->client;
437
438 mutex_lock(&the_menelaus->lock);
439 if (vtg == 0)
440 goto set_voltage;
441
442 ret = menelaus_read_reg(vtg->vtg_reg);
443 if (ret < 0)
444 goto out;
445 val = ret & ~(((1 << vtg->vtg_bits) - 1) << vtg->vtg_shift);
446 val |= vtg_val << vtg->vtg_shift;
447
448 dev_dbg(&c->dev, "Setting voltage '%s'"
449 "to %d mV (reg 0x%02x, val 0x%02x)\n",
450 vtg->name, mV, vtg->vtg_reg, val);
451
452 ret = menelaus_write_reg(vtg->vtg_reg, val);
453 if (ret < 0)
454 goto out;
455set_voltage:
456 ret = menelaus_write_reg(vtg->mode_reg, mode);
457out:
458 mutex_unlock(&the_menelaus->lock);
459 if (ret == 0) {
460 /* Wait for voltage to stabilize */
461 msleep(1);
462 }
463 return ret;
464}
465
466static int menelaus_get_vtg_value(int vtg, const struct menelaus_vtg_value *tbl,
467 int n)
468{
469 int i;
470
471 for (i = 0; i < n; i++, tbl++)
472 if (tbl->vtg == vtg)
473 return tbl->val;
474 return -EINVAL;
475}
476
477/*
478 * Vcore can be programmed in two ways:
479 * SW-controlled: Required voltage is programmed into VCORE_CTRL1
480 * HW-controlled: Required range (roof-floor) is programmed into VCORE_CTRL3
481 * and VCORE_CTRL4
482 *
483 * Call correct 'set' function accordingly
484 */
485
486static const struct menelaus_vtg_value vcore_values[] = {
487 { 1000, 0 },
488 { 1025, 1 },
489 { 1050, 2 },
490 { 1075, 3 },
491 { 1100, 4 },
492 { 1125, 5 },
493 { 1150, 6 },
494 { 1175, 7 },
495 { 1200, 8 },
496 { 1225, 9 },
497 { 1250, 10 },
498 { 1275, 11 },
499 { 1300, 12 },
500 { 1325, 13 },
501 { 1350, 14 },
502 { 1375, 15 },
503 { 1400, 16 },
504 { 1425, 17 },
505 { 1450, 18 },
506};
507
508int menelaus_set_vcore_sw(unsigned int mV)
509{
510 int val, ret;
511 struct i2c_client *c = the_menelaus->client;
512
513 val = menelaus_get_vtg_value(mV, vcore_values,
514 ARRAY_SIZE(vcore_values));
515 if (val < 0)
516 return -EINVAL;
517
518 dev_dbg(&c->dev, "Setting VCORE to %d mV (val 0x%02x)\n", mV, val);
519
520 /* Set SW mode and the voltage in one go. */
521 mutex_lock(&the_menelaus->lock);
522 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
523 if (ret == 0)
524 the_menelaus->vcore_hw_mode = 0;
525 mutex_unlock(&the_menelaus->lock);
526 msleep(1);
527
528 return ret;
529}
530
531int menelaus_set_vcore_hw(unsigned int roof_mV, unsigned int floor_mV)
532{
533 int fval, rval, val, ret;
534 struct i2c_client *c = the_menelaus->client;
535
536 rval = menelaus_get_vtg_value(roof_mV, vcore_values,
537 ARRAY_SIZE(vcore_values));
538 if (rval < 0)
539 return -EINVAL;
540 fval = menelaus_get_vtg_value(floor_mV, vcore_values,
541 ARRAY_SIZE(vcore_values));
542 if (fval < 0)
543 return -EINVAL;
544
545 dev_dbg(&c->dev, "Setting VCORE FLOOR to %d mV and ROOF to %d mV\n",
546 floor_mV, roof_mV);
547
548 mutex_lock(&the_menelaus->lock);
549 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL3, fval);
550 if (ret < 0)
551 goto out;
552 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL4, rval);
553 if (ret < 0)
554 goto out;
555 if (!the_menelaus->vcore_hw_mode) {
556 val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
557 /* HW mode, turn OFF byte comparator */
558 val |= ((1 << 7) | (1 << 5));
559 ret = menelaus_write_reg(MENELAUS_VCORE_CTRL1, val);
560 the_menelaus->vcore_hw_mode = 1;
561 }
562 msleep(1);
563out:
564 mutex_unlock(&the_menelaus->lock);
565 return ret;
566}
567
568static const struct menelaus_vtg vmem_vtg = {
569 .name = "VMEM",
570 .vtg_reg = MENELAUS_LDO_CTRL1,
571 .vtg_shift = 0,
572 .vtg_bits = 2,
573 .mode_reg = MENELAUS_LDO_CTRL3,
574};
575
576static const struct menelaus_vtg_value vmem_values[] = {
577 { 1500, 0 },
578 { 1800, 1 },
579 { 1900, 2 },
580 { 2500, 3 },
581};
582
583int menelaus_set_vmem(unsigned int mV)
584{
585 int val;
586
587 if (mV == 0)
588 return menelaus_set_voltage(&vmem_vtg, 0, 0, 0);
589
590 val = menelaus_get_vtg_value(mV, vmem_values, ARRAY_SIZE(vmem_values));
591 if (val < 0)
592 return -EINVAL;
593 return menelaus_set_voltage(&vmem_vtg, mV, val, 0x02);
594}
595EXPORT_SYMBOL(menelaus_set_vmem);
596
597static const struct menelaus_vtg vio_vtg = {
598 .name = "VIO",
599 .vtg_reg = MENELAUS_LDO_CTRL1,
600 .vtg_shift = 2,
601 .vtg_bits = 2,
602 .mode_reg = MENELAUS_LDO_CTRL4,
603};
604
605static const struct menelaus_vtg_value vio_values[] = {
606 { 1500, 0 },
607 { 1800, 1 },
608 { 2500, 2 },
609 { 2800, 3 },
610};
611
612int menelaus_set_vio(unsigned int mV)
613{
614 int val;
615
616 if (mV == 0)
617 return menelaus_set_voltage(&vio_vtg, 0, 0, 0);
618
619 val = menelaus_get_vtg_value(mV, vio_values, ARRAY_SIZE(vio_values));
620 if (val < 0)
621 return -EINVAL;
622 return menelaus_set_voltage(&vio_vtg, mV, val, 0x02);
623}
624EXPORT_SYMBOL(menelaus_set_vio);
625
626static const struct menelaus_vtg_value vdcdc_values[] = {
627 { 1500, 0 },
628 { 1800, 1 },
629 { 2000, 2 },
630 { 2200, 3 },
631 { 2400, 4 },
632 { 2800, 5 },
633 { 3000, 6 },
634 { 3300, 7 },
635};
636
637static const struct menelaus_vtg vdcdc2_vtg = {
638 .name = "VDCDC2",
639 .vtg_reg = MENELAUS_DCDC_CTRL1,
640 .vtg_shift = 0,
641 .vtg_bits = 3,
642 .mode_reg = MENELAUS_DCDC_CTRL2,
643};
644
645static const struct menelaus_vtg vdcdc3_vtg = {
646 .name = "VDCDC3",
647 .vtg_reg = MENELAUS_DCDC_CTRL1,
648 .vtg_shift = 3,
649 .vtg_bits = 3,
650 .mode_reg = MENELAUS_DCDC_CTRL3,
651};
652
653int menelaus_set_vdcdc(int dcdc, unsigned int mV)
654{
655 const struct menelaus_vtg *vtg;
656 int val;
657
658 if (dcdc != 2 && dcdc != 3)
659 return -EINVAL;
660 if (dcdc == 2)
661 vtg = &vdcdc2_vtg;
662 else
663 vtg = &vdcdc3_vtg;
664
665 if (mV == 0)
666 return menelaus_set_voltage(vtg, 0, 0, 0);
667
668 val = menelaus_get_vtg_value(mV, vdcdc_values,
669 ARRAY_SIZE(vdcdc_values));
670 if (val < 0)
671 return -EINVAL;
672 return menelaus_set_voltage(vtg, mV, val, 0x03);
673}
674
675static const struct menelaus_vtg_value vmmc_values[] = {
676 { 1850, 0 },
677 { 2800, 1 },
678 { 3000, 2 },
679 { 3100, 3 },
680};
681
682static const struct menelaus_vtg vmmc_vtg = {
683 .name = "VMMC",
684 .vtg_reg = MENELAUS_LDO_CTRL1,
685 .vtg_shift = 6,
686 .vtg_bits = 2,
687 .mode_reg = MENELAUS_LDO_CTRL7,
688};
689
690int menelaus_set_vmmc(unsigned int mV)
691{
692 int val;
693
694 if (mV == 0)
695 return menelaus_set_voltage(&vmmc_vtg, 0, 0, 0);
696
697 val = menelaus_get_vtg_value(mV, vmmc_values, ARRAY_SIZE(vmmc_values));
698 if (val < 0)
699 return -EINVAL;
700 return menelaus_set_voltage(&vmmc_vtg, mV, val, 0x02);
701}
702EXPORT_SYMBOL(menelaus_set_vmmc);
703
704
705static const struct menelaus_vtg_value vaux_values[] = {
706 { 1500, 0 },
707 { 1800, 1 },
708 { 2500, 2 },
709 { 2800, 3 },
710};
711
712static const struct menelaus_vtg vaux_vtg = {
713 .name = "VAUX",
714 .vtg_reg = MENELAUS_LDO_CTRL1,
715 .vtg_shift = 4,
716 .vtg_bits = 2,
717 .mode_reg = MENELAUS_LDO_CTRL6,
718};
719
720int menelaus_set_vaux(unsigned int mV)
721{
722 int val;
723
724 if (mV == 0)
725 return menelaus_set_voltage(&vaux_vtg, 0, 0, 0);
726
727 val = menelaus_get_vtg_value(mV, vaux_values, ARRAY_SIZE(vaux_values));
728 if (val < 0)
729 return -EINVAL;
730 return menelaus_set_voltage(&vaux_vtg, mV, val, 0x02);
731}
732EXPORT_SYMBOL(menelaus_set_vaux);
733
734int menelaus_get_slot_pin_states(void)
735{
736 return menelaus_read_reg(MENELAUS_MCT_PIN_ST);
737}
738EXPORT_SYMBOL(menelaus_get_slot_pin_states);
739
740int menelaus_set_regulator_sleep(int enable, u32 val)
741{
742 int t, ret;
743 struct i2c_client *c = the_menelaus->client;
744
745 mutex_lock(&the_menelaus->lock);
746 ret = menelaus_write_reg(MENELAUS_SLEEP_CTRL2, val);
747 if (ret < 0)
748 goto out;
749
750 dev_dbg(&c->dev, "regulator sleep configuration: %02x\n", val);
751
752 ret = menelaus_read_reg(MENELAUS_GPIO_CTRL);
753 if (ret < 0)
754 goto out;
755 t = ((1 << 6) | 0x04);
756 if (enable)
757 ret |= t;
758 else
759 ret &= ~t;
760 ret = menelaus_write_reg(MENELAUS_GPIO_CTRL, ret);
761out:
762 mutex_unlock(&the_menelaus->lock);
763 return ret;
764}
765
766/*-----------------------------------------------------------------------*/
767
768/* Handles Menelaus interrupts. Does not run in interrupt context */
769static void menelaus_work(struct work_struct *_menelaus)
770{
771 struct menelaus_chip *menelaus =
772 container_of(_menelaus, struct menelaus_chip, work);
773 void (*handler)(struct menelaus_chip *menelaus);
774
775 while (1) {
776 unsigned isr;
777
778 isr = (menelaus_read_reg(MENELAUS_INT_STATUS2)
779 & ~menelaus->mask2) << 8;
780 isr |= menelaus_read_reg(MENELAUS_INT_STATUS1)
781 & ~menelaus->mask1;
782 if (!isr)
783 break;
784
785 while (isr) {
786 int irq = fls(isr) - 1;
787 isr &= ~(1 << irq);
788
789 mutex_lock(&menelaus->lock);
790 menelaus_disable_irq(irq);
791 menelaus_ack_irq(irq);
792 handler = menelaus->handlers[irq];
793 if (handler)
794 handler(menelaus);
795 menelaus_enable_irq(irq);
796 mutex_unlock(&menelaus->lock);
797 }
798 }
799 enable_irq(menelaus->client->irq);
800}
801
802/*
803 * We cannot use I2C in interrupt context, so we just schedule work.
804 */
805static irqreturn_t menelaus_irq(int irq, void *_menelaus)
806{
807 struct menelaus_chip *menelaus = _menelaus;
808
809 disable_irq_nosync(irq);
810 (void)schedule_work(&menelaus->work);
811
812 return IRQ_HANDLED;
813}
814
815/*-----------------------------------------------------------------------*/
816
817/*
818 * The RTC needs to be set once, then it runs on backup battery power.
819 * It supports alarms, including system wake alarms (from some modes);
820 * and 1/second IRQs if requested.
821 */
822#ifdef CONFIG_RTC_DRV_TWL92330
823
824#define RTC_CTRL_RTC_EN (1 << 0)
825#define RTC_CTRL_AL_EN (1 << 1)
826#define RTC_CTRL_MODE12 (1 << 2)
827#define RTC_CTRL_EVERY_MASK (3 << 3)
828#define RTC_CTRL_EVERY_SEC (0 << 3)
829#define RTC_CTRL_EVERY_MIN (1 << 3)
830#define RTC_CTRL_EVERY_HR (2 << 3)
831#define RTC_CTRL_EVERY_DAY (3 << 3)
832
833#define RTC_UPDATE_EVERY 0x08
834
835#define RTC_HR_PM (1 << 7)
836
837static void menelaus_to_time(char *regs, struct rtc_time *t)
838{
839 t->tm_sec = BCD2BIN(regs[0]);
840 t->tm_min = BCD2BIN(regs[1]);
841 if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
842 t->tm_hour = BCD2BIN(regs[2] & 0x1f) - 1;
843 if (regs[2] & RTC_HR_PM)
844 t->tm_hour += 12;
845 } else
846 t->tm_hour = BCD2BIN(regs[2] & 0x3f);
847 t->tm_mday = BCD2BIN(regs[3]);
848 t->tm_mon = BCD2BIN(regs[4]) - 1;
849 t->tm_year = BCD2BIN(regs[5]) + 100;
850}
851
852static int time_to_menelaus(struct rtc_time *t, int regnum)
853{
854 int hour, status;
855
856 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_sec));
857 if (status < 0)
858 goto fail;
859
860 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_min));
861 if (status < 0)
862 goto fail;
863
864 if (the_menelaus->rtc_control & RTC_CTRL_MODE12) {
865 hour = t->tm_hour + 1;
866 if (hour > 12)
867 hour = RTC_HR_PM | BIN2BCD(hour - 12);
868 else
869 hour = BIN2BCD(hour);
870 } else
871 hour = BIN2BCD(t->tm_hour);
872 status = menelaus_write_reg(regnum++, hour);
873 if (status < 0)
874 goto fail;
875
876 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mday));
877 if (status < 0)
878 goto fail;
879
880 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_mon + 1));
881 if (status < 0)
882 goto fail;
883
884 status = menelaus_write_reg(regnum++, BIN2BCD(t->tm_year - 100));
885 if (status < 0)
886 goto fail;
887
888 return 0;
889fail:
890 dev_err(&the_menelaus->client->dev, "rtc write reg %02x, err %d\n",
891 --regnum, status);
892 return status;
893}
894
895static int menelaus_read_time(struct device *dev, struct rtc_time *t)
896{
897 struct i2c_msg msg[2];
898 char regs[7];
899 int status;
900
901 /* block read date and time registers */
902 regs[0] = MENELAUS_RTC_SEC;
903
904 msg[0].addr = MENELAUS_I2C_ADDRESS;
905 msg[0].flags = 0;
906 msg[0].len = 1;
907 msg[0].buf = regs;
908
909 msg[1].addr = MENELAUS_I2C_ADDRESS;
910 msg[1].flags = I2C_M_RD;
911 msg[1].len = sizeof(regs);
912 msg[1].buf = regs;
913
914 status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
915 if (status != 2) {
916 dev_err(dev, "%s error %d\n", "read", status);
917 return -EIO;
918 }
919
920 menelaus_to_time(regs, t);
921 t->tm_wday = BCD2BIN(regs[6]);
922
923 return 0;
924}
925
926static int menelaus_set_time(struct device *dev, struct rtc_time *t)
927{
928 int status;
929
930 /* write date and time registers */
931 status = time_to_menelaus(t, MENELAUS_RTC_SEC);
932 if (status < 0)
933 return status;
934 status = menelaus_write_reg(MENELAUS_RTC_WKDAY, BIN2BCD(t->tm_wday));
935 if (status < 0) {
936 dev_err(&the_menelaus->client->dev, "rtc write reg %02x",
937 "err %d\n", MENELAUS_RTC_WKDAY, status);
938 return status;
939 }
940
941 /* now commit the write */
942 status = menelaus_write_reg(MENELAUS_RTC_UPDATE, RTC_UPDATE_EVERY);
943 if (status < 0)
944 dev_err(&the_menelaus->client->dev, "rtc commit time, err %d\n",
945 status);
946
947 return 0;
948}
949
950static int menelaus_read_alarm(struct device *dev, struct rtc_wkalrm *w)
951{
952 struct i2c_msg msg[2];
953 char regs[6];
954 int status;
955
956 /* block read alarm registers */
957 regs[0] = MENELAUS_RTC_AL_SEC;
958
959 msg[0].addr = MENELAUS_I2C_ADDRESS;
960 msg[0].flags = 0;
961 msg[0].len = 1;
962 msg[0].buf = regs;
963
964 msg[1].addr = MENELAUS_I2C_ADDRESS;
965 msg[1].flags = I2C_M_RD;
966 msg[1].len = sizeof(regs);
967 msg[1].buf = regs;
968
969 status = i2c_transfer(the_menelaus->client->adapter, msg, 2);
970 if (status != 2) {
971 dev_err(dev, "%s error %d\n", "alarm read", status);
972 return -EIO;
973 }
974
975 menelaus_to_time(regs, &w->time);
976
977 w->enabled = !!(the_menelaus->rtc_control & RTC_CTRL_AL_EN);
978
979 /* NOTE we *could* check if actually pending... */
980 w->pending = 0;
981
982 return 0;
983}
984
985static int menelaus_set_alarm(struct device *dev, struct rtc_wkalrm *w)
986{
987 int status;
988
989 if (the_menelaus->client->irq <= 0 && w->enabled)
990 return -ENODEV;
991
992 /* clear previous alarm enable */
993 if (the_menelaus->rtc_control & RTC_CTRL_AL_EN) {
994 the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
995 status = menelaus_write_reg(MENELAUS_RTC_CTRL,
996 the_menelaus->rtc_control);
997 if (status < 0)
998 return status;
999 }
1000
1001 /* write alarm registers */
1002 status = time_to_menelaus(&w->time, MENELAUS_RTC_AL_SEC);
1003 if (status < 0)
1004 return status;
1005
1006 /* enable alarm if requested */
1007 if (w->enabled) {
1008 the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
1009 status = menelaus_write_reg(MENELAUS_RTC_CTRL,
1010 the_menelaus->rtc_control);
1011 }
1012
1013 return status;
1014}
1015
1016#ifdef CONFIG_RTC_INTF_DEV
1017
1018static void menelaus_rtc_update_work(struct menelaus_chip *m)
1019{
1020 /* report 1/sec update */
1021 local_irq_disable();
1022 rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_UF);
1023 local_irq_enable();
1024}
1025
1026static int menelaus_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
1027{
1028 int status;
1029
1030 if (the_menelaus->client->irq <= 0)
1031 return -ENOIOCTLCMD;
1032
1033 switch (cmd) {
1034 /* alarm IRQ */
1035 case RTC_AIE_ON:
1036 if (the_menelaus->rtc_control & RTC_CTRL_AL_EN)
1037 return 0;
1038 the_menelaus->rtc_control |= RTC_CTRL_AL_EN;
1039 break;
1040 case RTC_AIE_OFF:
1041 if (!(the_menelaus->rtc_control & RTC_CTRL_AL_EN))
1042 return 0;
1043 the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
1044 break;
1045 /* 1/second "update" IRQ */
1046 case RTC_UIE_ON:
1047 if (the_menelaus->uie)
1048 return 0;
1049 status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
1050 status = menelaus_add_irq_work(MENELAUS_RTCTMR_IRQ,
1051 menelaus_rtc_update_work);
1052 if (status == 0)
1053 the_menelaus->uie = 1;
1054 return status;
1055 case RTC_UIE_OFF:
1056 if (!the_menelaus->uie)
1057 return 0;
1058 status = menelaus_remove_irq_work(MENELAUS_RTCTMR_IRQ);
1059 if (status == 0)
1060 the_menelaus->uie = 0;
1061 return status;
1062 default:
1063 return -ENOIOCTLCMD;
1064 }
1065 return menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
1066}
1067
1068#else
1069#define menelaus_ioctl NULL
1070#endif
1071
1072/* REVISIT no compensation register support ... */
1073
1074static const struct rtc_class_ops menelaus_rtc_ops = {
1075 .ioctl = menelaus_ioctl,
1076 .read_time = menelaus_read_time,
1077 .set_time = menelaus_set_time,
1078 .read_alarm = menelaus_read_alarm,
1079 .set_alarm = menelaus_set_alarm,
1080};
1081
1082static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
1083{
1084 /* report alarm */
1085 local_irq_disable();
1086 rtc_update_irq(m->rtc, 1, RTC_IRQF | RTC_AF);
1087 local_irq_enable();
1088
1089 /* then disable it; alarms are oneshot */
1090 the_menelaus->rtc_control &= ~RTC_CTRL_AL_EN;
1091 menelaus_write_reg(MENELAUS_RTC_CTRL, the_menelaus->rtc_control);
1092}
1093
1094static inline void menelaus_rtc_init(struct menelaus_chip *m)
1095{
1096 int alarm = (m->client->irq > 0);
1097
1098 /* assume 32KDETEN pin is pulled high */
1099 if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
1100 dev_dbg(&m->client->dev, "no 32k oscillator\n");
1101 return;
1102 }
1103
1104 /* support RTC alarm; it can issue wakeups */
1105 if (alarm) {
1106 if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
1107 menelaus_rtc_alarm_work) < 0) {
1108 dev_err(&m->client->dev, "can't handle RTC alarm\n");
1109 return;
1110 }
1111 device_init_wakeup(&m->client->dev, 1);
1112 }
1113
1114 /* be sure RTC is enabled; allow 1/sec irqs; leave 12hr mode alone */
1115 m->rtc_control = menelaus_read_reg(MENELAUS_RTC_CTRL);
1116 if (!(m->rtc_control & RTC_CTRL_RTC_EN)
1117 || (m->rtc_control & RTC_CTRL_AL_EN)
1118 || (m->rtc_control & RTC_CTRL_EVERY_MASK)) {
1119 if (!(m->rtc_control & RTC_CTRL_RTC_EN)) {
1120 dev_warn(&m->client->dev, "rtc clock needs setting\n");
1121 m->rtc_control |= RTC_CTRL_RTC_EN;
1122 }
1123 m->rtc_control &= ~RTC_CTRL_EVERY_MASK;
1124 m->rtc_control &= ~RTC_CTRL_AL_EN;
1125 menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
1126 }
1127
1128 m->rtc = rtc_device_register(DRIVER_NAME,
1129 &m->client->dev,
1130 &menelaus_rtc_ops, THIS_MODULE);
1131 if (IS_ERR(m->rtc)) {
1132 if (alarm) {
1133 menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
1134 device_init_wakeup(&m->client->dev, 0);
1135 }
1136 dev_err(&m->client->dev, "can't register RTC: %d\n",
1137 (int) PTR_ERR(m->rtc));
1138 the_menelaus->rtc = NULL;
1139 }
1140}
1141
1142#else
1143
1144static inline void menelaus_rtc_init(struct menelaus_chip *m)
1145{
1146 /* nothing */
1147}
1148
1149#endif
1150
1151/*-----------------------------------------------------------------------*/
1152
1153static struct i2c_driver menelaus_i2c_driver;
1154
1155static int menelaus_probe(struct i2c_client *client)
1156{
1157 struct menelaus_chip *menelaus;
1158 int rev = 0, val;
1159 int err = 0;
1160 struct menelaus_platform_data *menelaus_pdata =
1161 client->dev.platform_data;
1162
1163 if (the_menelaus) {
1164 dev_dbg(&client->dev, "only one %s for now\n",
1165 DRIVER_NAME);
1166 return -ENODEV;
1167 }
1168
1169 menelaus = kzalloc(sizeof *menelaus, GFP_KERNEL);
1170 if (!menelaus)
1171 return -ENOMEM;
1172
1173 i2c_set_clientdata(client, menelaus);
1174
1175 the_menelaus = menelaus;
1176 menelaus->client = client;
1177
1178 /* If a true probe check the device */
1179 rev = menelaus_read_reg(MENELAUS_REV);
1180 if (rev < 0) {
1181 pr_err("device not found");
1182 err = -ENODEV;
1183 goto fail1;
1184 }
1185
1186 /* Ack and disable all Menelaus interrupts */
1187 menelaus_write_reg(MENELAUS_INT_ACK1, 0xff);
1188 menelaus_write_reg(MENELAUS_INT_ACK2, 0xff);
1189 menelaus_write_reg(MENELAUS_INT_MASK1, 0xff);
1190 menelaus_write_reg(MENELAUS_INT_MASK2, 0xff);
1191 menelaus->mask1 = 0xff;
1192 menelaus->mask2 = 0xff;
1193
1194 /* Set output buffer strengths */
1195 menelaus_write_reg(MENELAUS_MCT_CTRL1, 0x73);
1196
1197 if (client->irq > 0) {
1198 err = request_irq(client->irq, menelaus_irq, IRQF_DISABLED,
1199 DRIVER_NAME, menelaus);
1200 if (err) {
1201 dev_dbg(&client->dev, "can't get IRQ %d, err %d",
1202 client->irq, err);
1203 goto fail1;
1204 }
1205 }
1206
1207 mutex_init(&menelaus->lock);
1208 INIT_WORK(&menelaus->work, menelaus_work);
1209
1210 pr_info("Menelaus rev %d.%d\n", rev >> 4, rev & 0x0f);
1211
1212 val = menelaus_read_reg(MENELAUS_VCORE_CTRL1);
1213 if (val < 0)
1214 goto fail2;
1215 if (val & (1 << 7))
1216 menelaus->vcore_hw_mode = 1;
1217 else
1218 menelaus->vcore_hw_mode = 0;
1219
1220 if (menelaus_pdata != NULL && menelaus_pdata->late_init != NULL) {
1221 err = menelaus_pdata->late_init(&client->dev);
1222 if (err < 0)
1223 goto fail2;
1224 }
1225
1226 menelaus_rtc_init(menelaus);
1227
1228 return 0;
1229fail2:
1230 free_irq(client->irq, menelaus);
1231 flush_scheduled_work();
1232fail1:
1233 kfree(menelaus);
1234 return err;
1235}
1236
1237static int __exit menelaus_remove(struct i2c_client *client)
1238{
1239 struct menelaus_chip *menelaus = i2c_get_clientdata(client);
1240
1241 free_irq(client->irq, menelaus);
1242 kfree(menelaus);
1243 i2c_set_clientdata(client, NULL);
1244 the_menelaus = NULL;
1245 return 0;
1246}
1247
1248static struct i2c_driver menelaus_i2c_driver = {
1249 .driver = {
1250 .name = DRIVER_NAME,
1251 },
1252 .probe = menelaus_probe,
1253 .remove = __exit_p(menelaus_remove),
1254};
1255
1256static int __init menelaus_init(void)
1257{
1258 int res;
1259
1260 res = i2c_add_driver(&menelaus_i2c_driver);
1261 if (res < 0) {
1262 pr_err("driver registration failed\n");
1263 return res;
1264 }
1265
1266 return 0;
1267}
1268
1269static void __exit menelaus_exit(void)
1270{
1271 i2c_del_driver(&menelaus_i2c_driver);
1272
1273 /* FIXME: Shutdown menelaus parts that can be shut down */
1274}
1275
1276MODULE_AUTHOR("Texas Instruments, Inc. (and others)");
1277MODULE_DESCRIPTION("I2C interface for Menelaus.");
1278MODULE_LICENSE("GPL");
1279
1280module_init(menelaus_init);
1281module_exit(menelaus_exit);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index cc5801399467..5a4c5ea12f89 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1073,14 +1073,14 @@ static int init_irq (ide_hwif_t *hwif)
1073 hwgroup->hwif->next = hwif; 1073 hwgroup->hwif->next = hwif;
1074 spin_unlock_irq(&ide_lock); 1074 spin_unlock_irq(&ide_lock);
1075 } else { 1075 } else {
1076 hwgroup = kmalloc_node(sizeof(ide_hwgroup_t), GFP_KERNEL, 1076 hwgroup = kmalloc_node(sizeof(ide_hwgroup_t),
1077 GFP_KERNEL | __GFP_ZERO,
1077 hwif_to_node(hwif->drives[0].hwif)); 1078 hwif_to_node(hwif->drives[0].hwif));
1078 if (!hwgroup) 1079 if (!hwgroup)
1079 goto out_up; 1080 goto out_up;
1080 1081
1081 hwif->hwgroup = hwgroup; 1082 hwif->hwgroup = hwgroup;
1082 1083
1083 memset(hwgroup, 0, sizeof(ide_hwgroup_t));
1084 hwgroup->hwif = hwif->next = hwif; 1084 hwgroup->hwif = hwif->next = hwif;
1085 hwgroup->rq = NULL; 1085 hwgroup->rq = NULL;
1086 hwgroup->handler = NULL; 1086 hwgroup->handler = NULL;
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 0fc8c6e559e4..ee45259573c8 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -30,6 +30,7 @@
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <linux/kdev_t.h> 32#include <linux/kdev_t.h>
33#include <linux/freezer.h>
33#include <linux/suspend.h> 34#include <linux/suspend.h>
34#include <linux/kthread.h> 35#include <linux/kthread.h>
35#include <linux/preempt.h> 36#include <linux/preempt.h>
@@ -1128,8 +1129,6 @@ static int hpsbpkt_thread(void *__hi)
1128 struct list_head tmp; 1129 struct list_head tmp;
1129 int may_schedule; 1130 int may_schedule;
1130 1131
1131 current->flags |= PF_NOFREEZE;
1132
1133 while (!kthread_should_stop()) { 1132 while (!kthread_should_stop()) {
1134 1133
1135 INIT_LIST_HEAD(&tmp); 1134 INIT_LIST_HEAD(&tmp);
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index 51a12062ed36..2ffd53461db6 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1699,6 +1699,7 @@ static int nodemgr_host_thread(void *__hi)
1699 unsigned int g, generation = 0; 1699 unsigned int g, generation = 0;
1700 int i, reset_cycles = 0; 1700 int i, reset_cycles = 0;
1701 1701
1702 set_freezable();
1702 /* Setup our device-model entries */ 1703 /* Setup our device-model entries */
1703 nodemgr_create_host_dev_files(host); 1704 nodemgr_create_host_dev_files(host);
1704 1705
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index eef415b12b2e..11f1d99db40b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1591,7 +1591,7 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1591 int i; 1591 int i;
1592 int size; 1592 int size;
1593 int size0 = 0; 1593 int size0 = 0;
1594 u32 f0; 1594 u32 f0 = 0;
1595 int ind; 1595 int ind;
1596 u8 op0 = 0; 1596 u8 op0 = 0;
1597 1597
@@ -1946,7 +1946,7 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1946 int i; 1946 int i;
1947 int size; 1947 int size;
1948 int size0 = 0; 1948 int size0 = 0;
1949 u32 f0; 1949 u32 f0 = 0;
1950 int ind; 1950 int ind;
1951 u8 op0 = 0; 1951 u8 op0 = 0;
1952 1952
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index bd686a2a517d..20896d5e5f0e 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -445,6 +445,7 @@ static struct gameport *gameport_get_pending_child(struct gameport *parent)
445 445
446static int gameport_thread(void *nothing) 446static int gameport_thread(void *nothing)
447{ 447{
448 set_freezable();
448 do { 449 do {
449 gameport_handle_event(); 450 gameport_handle_event();
450 wait_event_interruptible(gameport_wait, 451 wait_event_interruptible(gameport_wait,
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a8f3bc1dff22..372ca4931194 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -384,6 +384,7 @@ static struct serio *serio_get_pending_child(struct serio *parent)
384 384
385static int serio_thread(void *nothing) 385static int serio_thread(void *nothing)
386{ 386{
387 set_freezable();
387 do { 388 do {
388 serio_handle_event(); 389 serio_handle_event();
389 wait_event_interruptible(serio_wait, 390 wait_event_interruptible(serio_wait,
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index f0cbcdb008ed..36f944019158 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -292,6 +292,7 @@ static int ucb1400_ts_thread(void *_ucb)
292 292
293 sched_setscheduler(tsk, SCHED_FIFO, &param); 293 sched_setscheduler(tsk, SCHED_FIFO, &param);
294 294
295 set_freezable();
295 while (!kthread_should_stop()) { 296 while (!kthread_should_stop()) {
296 unsigned int x, y, p; 297 unsigned int x, y, p;
297 long timeout; 298 long timeout;
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 3e088c42b222..cf906c8cee4d 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -2,12 +2,10 @@
2# ISDN device configuration 2# ISDN device configuration
3# 3#
4 4
5menu "ISDN subsystem" 5menuconfig ISDN
6 depends on !S390
7
8config ISDN
9 tristate "ISDN support" 6 tristate "ISDN support"
10 depends on NET 7 depends on NET
8 depends on !S390
11 ---help--- 9 ---help---
12 ISDN ("Integrated Services Digital Networks", called RNIS in France) 10 ISDN ("Integrated Services Digital Networks", called RNIS in France)
13 is a special type of fully digital telephone service; it's mostly 11 is a special type of fully digital telephone service; it's mostly
@@ -21,9 +19,9 @@ config ISDN
21 19
22 Select this option if you want your kernel to support ISDN. 20 Select this option if you want your kernel to support ISDN.
23 21
22if ISDN
24 23
25menu "Old ISDN4Linux" 24menu "Old ISDN4Linux"
26 depends on NET && ISDN
27 25
28config ISDN_I4L 26config ISDN_I4L
29 tristate "Old ISDN4Linux (deprecated)" 27 tristate "Old ISDN4Linux (deprecated)"
@@ -50,20 +48,21 @@ endif
50endmenu 48endmenu
51 49
52comment "CAPI subsystem" 50comment "CAPI subsystem"
53 depends on NET && ISDN
54 51
55config ISDN_CAPI 52config ISDN_CAPI
56 tristate "CAPI2.0 support" 53 tristate "CAPI2.0 support"
57 depends on ISDN
58 help 54 help
59 This provides the CAPI (Common ISDN Application Programming 55 This provides the CAPI (Common ISDN Application Programming
60 Interface, a standard making it easy for programs to access ISDN 56 Interface, a standard making it easy for programs to access ISDN
61 hardware, see <http://www.capi.org/>. This is needed for AVM's set 57 hardware, see <http://www.capi.org/>. This is needed for AVM's set
62 of active ISDN controllers like B1, T1, M1. 58 of active ISDN controllers like B1, T1, M1.
63 59
60if ISDN_CAPI
61
64source "drivers/isdn/capi/Kconfig" 62source "drivers/isdn/capi/Kconfig"
65 63
66source "drivers/isdn/hardware/Kconfig" 64source "drivers/isdn/hardware/Kconfig"
67 65
68endmenu 66endif # ISDN_CAPI
69 67
68endif # ISDN
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index c92f9d764fce..e1afd60924fb 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -3,7 +3,6 @@
3# 3#
4config ISDN_DRV_AVMB1_VERBOSE_REASON 4config ISDN_DRV_AVMB1_VERBOSE_REASON
5 bool "Verbose reason code reporting" 5 bool "Verbose reason code reporting"
6 depends on ISDN_CAPI
7 default y 6 default y
8 help 7 help
9 If you say Y here, the CAPI drivers will give verbose reasons for 8 If you say Y here, the CAPI drivers will give verbose reasons for
@@ -12,7 +11,6 @@ config ISDN_DRV_AVMB1_VERBOSE_REASON
12 11
13config CAPI_TRACE 12config CAPI_TRACE
14 bool "CAPI trace support" 13 bool "CAPI trace support"
15 depends on ISDN_CAPI
16 default y 14 default y
17 help 15 help
18 If you say Y here, the kernelcapi driver can make verbose traces 16 If you say Y here, the kernelcapi driver can make verbose traces
@@ -23,7 +21,7 @@ config CAPI_TRACE
23 21
24config ISDN_CAPI_MIDDLEWARE 22config ISDN_CAPI_MIDDLEWARE
25 bool "CAPI2.0 Middleware support (EXPERIMENTAL)" 23 bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
26 depends on ISDN_CAPI && EXPERIMENTAL 24 depends on EXPERIMENTAL
27 help 25 help
28 This option will enhance the capabilities of the /dev/capi20 26 This option will enhance the capabilities of the /dev/capi20
29 interface. It will provide a means of moving a data connection, 27 interface. It will provide a means of moving a data connection,
@@ -33,7 +31,6 @@ config ISDN_CAPI_MIDDLEWARE
33 31
34config ISDN_CAPI_CAPI20 32config ISDN_CAPI_CAPI20
35 tristate "CAPI2.0 /dev/capi support" 33 tristate "CAPI2.0 /dev/capi support"
36 depends on ISDN_CAPI
37 help 34 help
38 This option will provide the CAPI 2.0 interface to userspace 35 This option will provide the CAPI 2.0 interface to userspace
39 applications via /dev/capi20. Applications should use the 36 applications via /dev/capi20. Applications should use the
@@ -56,7 +53,7 @@ config ISDN_CAPI_CAPIFS
56 53
57config ISDN_CAPI_CAPIDRV 54config ISDN_CAPI_CAPIDRV
58 tristate "CAPI2.0 capidrv interface support" 55 tristate "CAPI2.0 capidrv interface support"
59 depends on ISDN_CAPI && ISDN_I4L 56 depends on ISDN_I4L
60 help 57 help
61 This option provides the glue code to hook up CAPI driven cards to 58 This option provides the glue code to hook up CAPI driven cards to
62 the legacy isdn4linux link layer. If you have a card which is 59 the legacy isdn4linux link layer. If you have a card which is
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 81661b8bd3a8..f449daef3eed 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -549,7 +549,7 @@ static int handle_minor_send(struct capiminor *mp)
549 capimsg_setu8 (skb->data, 5, CAPI_REQ); 549 capimsg_setu8 (skb->data, 5, CAPI_REQ);
550 capimsg_setu16(skb->data, 6, mp->msgid++); 550 capimsg_setu16(skb->data, 6, mp->msgid++);
551 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */ 551 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
552 capimsg_setu32(skb->data, 12, (u32) skb->data); /* Data32 */ 552 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
553 capimsg_setu16(skb->data, 16, len); /* Data length */ 553 capimsg_setu16(skb->data, 16, len); /* Data length */
554 capimsg_setu16(skb->data, 18, datahandle); 554 capimsg_setu16(skb->data, 18, datahandle);
555 capimsg_setu16(skb->data, 20, 0); /* Flags */ 555 capimsg_setu16(skb->data, 20, 0); /* Flags */
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 3ed34f7a1c4f..9f73bc2727c2 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -258,7 +258,7 @@ static void recv_handler(struct work_struct *work)
258 if ((!ap) || (ap->release_in_progress)) 258 if ((!ap) || (ap->release_in_progress))
259 return; 259 return;
260 260
261 down(&ap->recv_sem); 261 mutex_lock(&ap->recv_mtx);
262 while ((skb = skb_dequeue(&ap->recv_queue))) { 262 while ((skb = skb_dequeue(&ap->recv_queue))) {
263 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND) 263 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_IND)
264 ap->nrecvdatapkt++; 264 ap->nrecvdatapkt++;
@@ -267,7 +267,7 @@ static void recv_handler(struct work_struct *work)
267 267
268 ap->recv_message(ap, skb); 268 ap->recv_message(ap, skb);
269 } 269 }
270 up(&ap->recv_sem); 270 mutex_unlock(&ap->recv_mtx);
271} 271}
272 272
273void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb) 273void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
@@ -547,7 +547,7 @@ u16 capi20_register(struct capi20_appl *ap)
547 ap->nsentctlpkt = 0; 547 ap->nsentctlpkt = 0;
548 ap->nsentdatapkt = 0; 548 ap->nsentdatapkt = 0;
549 ap->callback = NULL; 549 ap->callback = NULL;
550 init_MUTEX(&ap->recv_sem); 550 mutex_init(&ap->recv_mtx);
551 skb_queue_head_init(&ap->recv_queue); 551 skb_queue_head_init(&ap->recv_queue);
552 INIT_WORK(&ap->recv_work, recv_handler); 552 INIT_WORK(&ap->recv_work, recv_handler);
553 ap->release_in_progress = 0; 553 ap->release_in_progress = 0;
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 31f4fd8b8b0a..845a797b0030 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -243,36 +243,15 @@ create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
243 243
244// --------------------------------------------------------------------------- 244// ---------------------------------------------------------------------------
245 245
246
247static __inline__ struct capi_driver *capi_driver_get_idx(loff_t pos)
248{
249 struct capi_driver *drv = NULL;
250 struct list_head *l;
251 loff_t i;
252
253 i = 0;
254 list_for_each(l, &capi_drivers) {
255 drv = list_entry(l, struct capi_driver, list);
256 if (i++ == pos)
257 return drv;
258 }
259 return NULL;
260}
261
262static void *capi_driver_start(struct seq_file *seq, loff_t *pos) 246static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
263{ 247{
264 struct capi_driver *drv;
265 read_lock(&capi_drivers_list_lock); 248 read_lock(&capi_drivers_list_lock);
266 drv = capi_driver_get_idx(*pos); 249 return seq_list_start(&capi_drivers, *pos);
267 return drv;
268} 250}
269 251
270static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos) 252static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
271{ 253{
272 struct capi_driver *drv = (struct capi_driver *)v; 254 return seq_list_next(v, &capi_drivers, pos);
273 ++*pos;
274 if (drv->list.next == &capi_drivers) return NULL;
275 return list_entry(drv->list.next, struct capi_driver, list);
276} 255}
277 256
278static void capi_driver_stop(struct seq_file *seq, void *v) 257static void capi_driver_stop(struct seq_file *seq, void *v)
@@ -282,7 +261,8 @@ static void capi_driver_stop(struct seq_file *seq, void *v)
282 261
283static int capi_driver_show(struct seq_file *seq, void *v) 262static int capi_driver_show(struct seq_file *seq, void *v)
284{ 263{
285 struct capi_driver *drv = (struct capi_driver *)v; 264 struct capi_driver *drv = list_entry(v, struct capi_driver, list);
265
286 seq_printf(seq, "%-32s %s\n", drv->name, drv->revision); 266 seq_printf(seq, "%-32s %s\n", drv->name, drv->revision);
287 return 0; 267 return 0;
288} 268}
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 139f19797713..30d028d24955 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -2,7 +2,6 @@
2# ISDN hardware drivers 2# ISDN hardware drivers
3# 3#
4comment "CAPI hardware drivers" 4comment "CAPI hardware drivers"
5 depends on NET && ISDN && ISDN_CAPI
6 5
7source "drivers/isdn/hardware/avm/Kconfig" 6source "drivers/isdn/hardware/avm/Kconfig"
8 7
diff --git a/drivers/isdn/hardware/avm/Kconfig b/drivers/isdn/hardware/avm/Kconfig
index 29a32a8830c0..5dbcbe3a54a6 100644
--- a/drivers/isdn/hardware/avm/Kconfig
+++ b/drivers/isdn/hardware/avm/Kconfig
@@ -2,23 +2,22 @@
2# ISDN AVM drivers 2# ISDN AVM drivers
3# 3#
4 4
5menu "Active AVM cards" 5menuconfig CAPI_AVM
6 depends on NET && ISDN && ISDN_CAPI!=n 6 bool "Active AVM cards"
7
8config CAPI_AVM
9 bool "Support AVM cards"
10 help 7 help
11 Enable support for AVM active ISDN cards. 8 Enable support for AVM active ISDN cards.
12 9
10if CAPI_AVM
11
13config ISDN_DRV_AVMB1_B1ISA 12config ISDN_DRV_AVMB1_B1ISA
14 tristate "AVM B1 ISA support" 13 tristate "AVM B1 ISA support"
15 depends on CAPI_AVM && ISDN_CAPI && ISA 14 depends on ISA
16 help 15 help
17 Enable support for the ISA version of the AVM B1 card. 16 Enable support for the ISA version of the AVM B1 card.
18 17
19config ISDN_DRV_AVMB1_B1PCI 18config ISDN_DRV_AVMB1_B1PCI
20 tristate "AVM B1 PCI support" 19 tristate "AVM B1 PCI support"
21 depends on CAPI_AVM && ISDN_CAPI && PCI 20 depends on PCI
22 help 21 help
23 Enable support for the PCI version of the AVM B1 card. 22 Enable support for the PCI version of the AVM B1 card.
24 23
@@ -30,14 +29,13 @@ config ISDN_DRV_AVMB1_B1PCIV4
30 29
31config ISDN_DRV_AVMB1_T1ISA 30config ISDN_DRV_AVMB1_T1ISA
32 tristate "AVM T1/T1-B ISA support" 31 tristate "AVM T1/T1-B ISA support"
33 depends on CAPI_AVM && ISDN_CAPI && ISA 32 depends on ISA
34 help 33 help
35 Enable support for the AVM T1 T1B card. 34 Enable support for the AVM T1 T1B card.
36 Note: This is a PRI card and handle 30 B-channels. 35 Note: This is a PRI card and handle 30 B-channels.
37 36
38config ISDN_DRV_AVMB1_B1PCMCIA 37config ISDN_DRV_AVMB1_B1PCMCIA
39 tristate "AVM B1/M1/M2 PCMCIA support" 38 tristate "AVM B1/M1/M2 PCMCIA support"
40 depends on CAPI_AVM && ISDN_CAPI
41 help 39 help
42 Enable support for the PCMCIA version of the AVM B1 card. 40 Enable support for the PCMCIA version of the AVM B1 card.
43 41
@@ -50,17 +48,16 @@ config ISDN_DRV_AVMB1_AVM_CS
50 48
51config ISDN_DRV_AVMB1_T1PCI 49config ISDN_DRV_AVMB1_T1PCI
52 tristate "AVM T1/T1-B PCI support" 50 tristate "AVM T1/T1-B PCI support"
53 depends on CAPI_AVM && ISDN_CAPI && PCI 51 depends on PCI
54 help 52 help
55 Enable support for the AVM T1 T1B card. 53 Enable support for the AVM T1 T1B card.
56 Note: This is a PRI card and handle 30 B-channels. 54 Note: This is a PRI card and handle 30 B-channels.
57 55
58config ISDN_DRV_AVMB1_C4 56config ISDN_DRV_AVMB1_C4
59 tristate "AVM C4/C2 support" 57 tristate "AVM C4/C2 support"
60 depends on CAPI_AVM && ISDN_CAPI && PCI 58 depends on PCI
61 help 59 help
62 Enable support for the AVM C4/C2 PCI cards. 60 Enable support for the AVM C4/C2 PCI cards.
63 These cards handle 4/2 BRI ISDN lines (8/4 channels). 61 These cards handle 4/2 BRI ISDN lines (8/4 channels).
64 62
65endmenu 63endif # CAPI_AVM
66
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
index 01d4afd9d843..6082b6a5ced3 100644
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ b/drivers/isdn/hardware/eicon/Kconfig
@@ -2,52 +2,50 @@
2# ISDN DIVAS Eicon driver 2# ISDN DIVAS Eicon driver
3# 3#
4 4
5menu "Active Eicon DIVA Server cards" 5menuconfig CAPI_EICON
6 depends on NET && ISDN && ISDN_CAPI!=n 6 bool "Active Eicon DIVA Server cards"
7
8config CAPI_EICON
9 bool "Support Eicon cards"
10 help 7 help
11 Enable support for Eicon Networks active ISDN cards. 8 Enable support for Eicon Networks active ISDN cards.
12 9
10if CAPI_EICON
11
13config ISDN_DIVAS 12config ISDN_DIVAS
14 tristate "Support Eicon DIVA Server cards" 13 tristate "Support Eicon DIVA Server cards"
15 depends on CAPI_EICON && PROC_FS && PCI 14 depends on PROC_FS && PCI
16 help 15 help
17 Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card. 16 Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
18 In order to use this card, additional firmware is necessary, which 17 In order to use this card, additional firmware is necessary, which
19 has to be downloaded into the card using the divactrl utility. 18 has to be downloaded into the card using the divactrl utility.
20 19
20if ISDN_DIVAS
21
21config ISDN_DIVAS_BRIPCI 22config ISDN_DIVAS_BRIPCI
22 bool "DIVA Server BRI/PCI support" 23 bool "DIVA Server BRI/PCI support"
23 depends on ISDN_DIVAS
24 help 24 help
25 Enable support for DIVA Server BRI-PCI. 25 Enable support for DIVA Server BRI-PCI.
26 26
27config ISDN_DIVAS_PRIPCI 27config ISDN_DIVAS_PRIPCI
28 bool "DIVA Server PRI/PCI support" 28 bool "DIVA Server PRI/PCI support"
29 depends on ISDN_DIVAS
30 help 29 help
31 Enable support for DIVA Server PRI-PCI. 30 Enable support for DIVA Server PRI-PCI.
32 31
33config ISDN_DIVAS_DIVACAPI 32config ISDN_DIVAS_DIVACAPI
34 tristate "DIVA CAPI2.0 interface support" 33 tristate "DIVA CAPI2.0 interface support"
35 depends on ISDN_DIVAS && ISDN_CAPI
36 help 34 help
37 You need this to provide the CAPI interface 35 You need this to provide the CAPI interface
38 for DIVA Server cards. 36 for DIVA Server cards.
39 37
40config ISDN_DIVAS_USERIDI 38config ISDN_DIVAS_USERIDI
41 tristate "DIVA User-IDI interface support" 39 tristate "DIVA User-IDI interface support"
42 depends on ISDN_DIVAS
43 help 40 help
44 Enable support for user-mode IDI interface. 41 Enable support for user-mode IDI interface.
45 42
46config ISDN_DIVAS_MAINT 43config ISDN_DIVAS_MAINT
47 tristate "DIVA Maint driver support" 44 tristate "DIVA Maint driver support"
48 depends on ISDN_DIVAS && m 45 depends on m
49 help 46 help
50 Enable Divas Maintenance driver. 47 Enable Divas Maintenance driver.
51 48
52endmenu 49endif # ISDN_DIVAS
53 50
51endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index 4cbc68cf4dba..db87d5105422 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -106,6 +106,7 @@ static void um_new_card(DESCRIPTOR * d)
106 } else { 106 } else {
107 DBG_ERR(("could not create user mode idi card %d", 107 DBG_ERR(("could not create user mode idi card %d",
108 adapter_nr)); 108 adapter_nr));
109 diva_os_free(0, card);
109 } 110 }
110} 111}
111 112
diff --git a/drivers/isdn/hisax/bkm_a4t.c b/drivers/isdn/hisax/bkm_a4t.c
index 871310d56a6e..3d1bdc8431ad 100644
--- a/drivers/isdn/hisax/bkm_a4t.c
+++ b/drivers/isdn/hisax/bkm_a4t.c
@@ -255,54 +255,38 @@ BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
255 return (0); 255 return (0);
256} 256}
257 257
258static struct pci_dev *dev_a4t __devinitdata = NULL; 258static int __devinit a4t_pci_probe(struct pci_dev *dev_a4t,
259 struct IsdnCardState *cs,
260 u_int *found,
261 u_int *pci_memaddr)
262{
263 u16 sub_sys;
264 u16 sub_vendor;
265
266 sub_vendor = dev_a4t->subsystem_vendor;
267 sub_sys = dev_a4t->subsystem_device;
268 if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
269 if (pci_enable_device(dev_a4t))
270 return (0); /* end loop & function */
271 *found = 1;
272 *pci_memaddr = pci_resource_start(dev_a4t, 0);
273 cs->irq = dev_a4t->irq;
274 return (1); /* end loop */
275 }
259 276
260int __devinit 277 return (-1); /* continue looping */
261setup_bkm_a4t(struct IsdnCard *card) 278}
279
280static int __devinit a4t_cs_init(struct IsdnCard *card,
281 struct IsdnCardState *cs,
282 u_int pci_memaddr)
262{ 283{
263 struct IsdnCardState *cs = card->cs;
264 char tmp[64];
265 u_int pci_memaddr = 0, found = 0;
266 I20_REGISTER_FILE *pI20_Regs; 284 I20_REGISTER_FILE *pI20_Regs;
267#ifdef CONFIG_PCI
268#endif
269
270 strcpy(tmp, bkm_a4t_revision);
271 printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
272 if (cs->typ == ISDN_CTYPE_BKM_A4T) {
273 cs->subtyp = BKM_A4T;
274 } else
275 return (0);
276 285
277#ifdef CONFIG_PCI
278 while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
279 PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
280 u16 sub_sys;
281 u16 sub_vendor;
282
283 sub_vendor = dev_a4t->subsystem_vendor;
284 sub_sys = dev_a4t->subsystem_device;
285 if ((sub_sys == PCI_DEVICE_ID_BERKOM_A4T) && (sub_vendor == PCI_VENDOR_ID_BERKOM)) {
286 if (pci_enable_device(dev_a4t))
287 return(0);
288 found = 1;
289 pci_memaddr = pci_resource_start(dev_a4t, 0);
290 cs->irq = dev_a4t->irq;
291 break;
292 }
293 }
294 if (!found) {
295 printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
296 return (0);
297 }
298 if (!cs->irq) { /* IRQ range check ?? */ 286 if (!cs->irq) { /* IRQ range check ?? */
299 printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]); 287 printk(KERN_WARNING "HiSax: %s: No IRQ\n", CardType[card->typ]);
300 return (0); 288 return (0);
301 } 289 }
302 if (!pci_memaddr) {
303 printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
304 return (0);
305 }
306 cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096); 290 cs->hw.ax.base = (long) ioremap(pci_memaddr, 4096);
307 /* Check suspecious address */ 291 /* Check suspecious address */
308 pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base); 292 pI20_Regs = (I20_REGISTER_FILE *) (cs->hw.ax.base);
@@ -317,11 +301,7 @@ setup_bkm_a4t(struct IsdnCard *card)
317 cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET; 301 cs->hw.ax.jade_adr = cs->hw.ax.base + PO_OFFSET;
318 cs->hw.ax.isac_ale = GCS_1; 302 cs->hw.ax.isac_ale = GCS_1;
319 cs->hw.ax.jade_ale = GCS_3; 303 cs->hw.ax.jade_ale = GCS_3;
320#else 304
321 printk(KERN_WARNING "HiSax: %s: NO_PCI_BIOS\n", CardType[card->typ]);
322 printk(KERN_WARNING "HiSax: %s: unable to configure\n", CardType[card->typ]);
323 return (0);
324#endif /* CONFIG_PCI */
325 printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n", 305 printk(KERN_INFO "HiSax: %s: Card configured at 0x%lX IRQ %d\n",
326 CardType[card->typ], cs->hw.ax.base, cs->irq); 306 CardType[card->typ], cs->hw.ax.base, cs->irq);
327 307
@@ -339,5 +319,43 @@ setup_bkm_a4t(struct IsdnCard *card)
339 ISACVersion(cs, "Telekom A4T:"); 319 ISACVersion(cs, "Telekom A4T:");
340 /* Jade version */ 320 /* Jade version */
341 JadeVersion(cs, "Telekom A4T:"); 321 JadeVersion(cs, "Telekom A4T:");
322
342 return (1); 323 return (1);
343} 324}
325
326static struct pci_dev *dev_a4t __devinitdata = NULL;
327
328int __devinit
329setup_bkm_a4t(struct IsdnCard *card)
330{
331 struct IsdnCardState *cs = card->cs;
332 char tmp[64];
333 u_int pci_memaddr = 0, found = 0;
334 int ret;
335
336 strcpy(tmp, bkm_a4t_revision);
337 printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
338 if (cs->typ == ISDN_CTYPE_BKM_A4T) {
339 cs->subtyp = BKM_A4T;
340 } else
341 return (0);
342
343 while ((dev_a4t = pci_find_device(PCI_VENDOR_ID_ZORAN,
344 PCI_DEVICE_ID_ZORAN_36120, dev_a4t))) {
345 ret = a4t_pci_probe(dev_a4t, cs, &found, &pci_memaddr);
346 if (!ret)
347 return (0);
348 if (ret > 0)
349 break;
350 }
351 if (!found) {
352 printk(KERN_WARNING "HiSax: %s: Card not found\n", CardType[card->typ]);
353 return (0);
354 }
355 if (!pci_memaddr) {
356 printk(KERN_WARNING "HiSax: %s: No Memory base address\n", CardType[card->typ]);
357 return (0);
358 }
359
360 return a4t_cs_init(card, cs, pci_memaddr);
361}
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index 8d53a7fd2671..5f7907e57090 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -361,11 +361,11 @@ module_param_array(io1, int, NULL, 0);
361 361
362int nrcards; 362int nrcards;
363 363
364extern char *l1_revision; 364extern const char *l1_revision;
365extern char *l2_revision; 365extern const char *l2_revision;
366extern char *l3_revision; 366extern const char *l3_revision;
367extern char *lli_revision; 367extern const char *lli_revision;
368extern char *tei_revision; 368extern const char *tei_revision;
369 369
370char *HiSax_getrev(const char *revision) 370char *HiSax_getrev(const char *revision)
371{ 371{
@@ -847,95 +847,10 @@ static int init_card(struct IsdnCardState *cs)
847 return 3; 847 return 3;
848} 848}
849 849
850static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner) 850static int hisax_cs_setup_card(struct IsdnCard *card)
851{ 851{
852 int ret = 0; 852 int ret;
853 struct IsdnCard *card = cards + cardnr;
854 struct IsdnCardState *cs;
855 853
856 cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
857 if (!cs) {
858 printk(KERN_WARNING
859 "HiSax: No memory for IsdnCardState(card %d)\n",
860 cardnr + 1);
861 goto out;
862 }
863 card->cs = cs;
864 spin_lock_init(&cs->statlock);
865 spin_lock_init(&cs->lock);
866 cs->chanlimit = 2; /* maximum B-channel number */
867 cs->logecho = 0; /* No echo logging */
868 cs->cardnr = cardnr;
869 cs->debug = L1_DEB_WARN;
870 cs->HW_Flags = 0;
871 cs->busy_flag = busy_flag;
872 cs->irq_flags = I4L_IRQ_FLAG;
873#if TEI_PER_CARD
874 if (card->protocol == ISDN_PTYPE_NI1)
875 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
876#else
877 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
878#endif
879 cs->protocol = card->protocol;
880
881 if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
882 printk(KERN_WARNING
883 "HiSax: Card Type %d out of range\n", card->typ);
884 goto outf_cs;
885 }
886 if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
887 printk(KERN_WARNING
888 "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
889 goto outf_cs;
890 }
891 if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
892 printk(KERN_WARNING
893 "HiSax: No memory for status_buf(card %d)\n",
894 cardnr + 1);
895 goto outf_dlog;
896 }
897 cs->stlist = NULL;
898 cs->status_read = cs->status_buf;
899 cs->status_write = cs->status_buf;
900 cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
901 cs->typ = card->typ;
902#ifdef MODULE
903 cs->iif.owner = lockowner;
904#endif
905 strcpy(cs->iif.id, id);
906 cs->iif.channels = 2;
907 cs->iif.maxbufsize = MAX_DATA_SIZE;
908 cs->iif.hl_hdrlen = MAX_HEADER_LEN;
909 cs->iif.features =
910 ISDN_FEATURE_L2_X75I |
911 ISDN_FEATURE_L2_HDLC |
912 ISDN_FEATURE_L2_HDLC_56K |
913 ISDN_FEATURE_L2_TRANS |
914 ISDN_FEATURE_L3_TRANS |
915#ifdef CONFIG_HISAX_1TR6
916 ISDN_FEATURE_P_1TR6 |
917#endif
918#ifdef CONFIG_HISAX_EURO
919 ISDN_FEATURE_P_EURO |
920#endif
921#ifdef CONFIG_HISAX_NI1
922 ISDN_FEATURE_P_NI1 |
923#endif
924 0;
925
926 cs->iif.command = HiSax_command;
927 cs->iif.writecmd = NULL;
928 cs->iif.writebuf_skb = HiSax_writebuf_skb;
929 cs->iif.readstat = HiSax_readstatus;
930 register_isdn(&cs->iif);
931 cs->myid = cs->iif.channels;
932 printk(KERN_INFO
933 "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
934 (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
935 (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
936 (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
937 (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
938 "NONE", cs->iif.id, cs->myid);
939 switch (card->typ) { 854 switch (card->typ) {
940#if CARD_TELES0 855#if CARD_TELES0
941 case ISDN_CTYPE_16_0: 856 case ISDN_CTYPE_16_0:
@@ -1094,13 +1009,115 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
1094 printk(KERN_WARNING 1009 printk(KERN_WARNING
1095 "HiSax: Support for %s Card not selected\n", 1010 "HiSax: Support for %s Card not selected\n",
1096 CardType[card->typ]); 1011 CardType[card->typ]);
1097 ll_unload(cs); 1012 ret = 0;
1013 break;
1014 }
1015
1016 return ret;
1017}
1018
1019static int hisax_cs_new(int cardnr, char *id, struct IsdnCard *card,
1020 struct IsdnCardState **cs_out, int *busy_flag,
1021 struct module *lockowner)
1022{
1023 struct IsdnCardState *cs;
1024
1025 *cs_out = NULL;
1026
1027 cs = kzalloc(sizeof(struct IsdnCardState), GFP_ATOMIC);
1028 if (!cs) {
1029 printk(KERN_WARNING
1030 "HiSax: No memory for IsdnCardState(card %d)\n",
1031 cardnr + 1);
1032 goto out;
1033 }
1034 card->cs = cs;
1035 spin_lock_init(&cs->statlock);
1036 spin_lock_init(&cs->lock);
1037 cs->chanlimit = 2; /* maximum B-channel number */
1038 cs->logecho = 0; /* No echo logging */
1039 cs->cardnr = cardnr;
1040 cs->debug = L1_DEB_WARN;
1041 cs->HW_Flags = 0;
1042 cs->busy_flag = busy_flag;
1043 cs->irq_flags = I4L_IRQ_FLAG;
1044#if TEI_PER_CARD
1045 if (card->protocol == ISDN_PTYPE_NI1)
1046 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
1047#else
1048 test_and_set_bit(FLG_TWO_DCHAN, &cs->HW_Flags);
1049#endif
1050 cs->protocol = card->protocol;
1051
1052 if (card->typ <= 0 || card->typ > ISDN_CTYPE_COUNT) {
1053 printk(KERN_WARNING
1054 "HiSax: Card Type %d out of range\n", card->typ);
1098 goto outf_cs; 1055 goto outf_cs;
1099 } 1056 }
1100 if (!ret) { 1057 if (!(cs->dlog = kmalloc(MAX_DLOG_SPACE, GFP_ATOMIC))) {
1101 ll_unload(cs); 1058 printk(KERN_WARNING
1059 "HiSax: No memory for dlog(card %d)\n", cardnr + 1);
1102 goto outf_cs; 1060 goto outf_cs;
1103 } 1061 }
1062 if (!(cs->status_buf = kmalloc(HISAX_STATUS_BUFSIZE, GFP_ATOMIC))) {
1063 printk(KERN_WARNING
1064 "HiSax: No memory for status_buf(card %d)\n",
1065 cardnr + 1);
1066 goto outf_dlog;
1067 }
1068 cs->stlist = NULL;
1069 cs->status_read = cs->status_buf;
1070 cs->status_write = cs->status_buf;
1071 cs->status_end = cs->status_buf + HISAX_STATUS_BUFSIZE - 1;
1072 cs->typ = card->typ;
1073#ifdef MODULE
1074 cs->iif.owner = lockowner;
1075#endif
1076 strcpy(cs->iif.id, id);
1077 cs->iif.channels = 2;
1078 cs->iif.maxbufsize = MAX_DATA_SIZE;
1079 cs->iif.hl_hdrlen = MAX_HEADER_LEN;
1080 cs->iif.features =
1081 ISDN_FEATURE_L2_X75I |
1082 ISDN_FEATURE_L2_HDLC |
1083 ISDN_FEATURE_L2_HDLC_56K |
1084 ISDN_FEATURE_L2_TRANS |
1085 ISDN_FEATURE_L3_TRANS |
1086#ifdef CONFIG_HISAX_1TR6
1087 ISDN_FEATURE_P_1TR6 |
1088#endif
1089#ifdef CONFIG_HISAX_EURO
1090 ISDN_FEATURE_P_EURO |
1091#endif
1092#ifdef CONFIG_HISAX_NI1
1093 ISDN_FEATURE_P_NI1 |
1094#endif
1095 0;
1096
1097 cs->iif.command = HiSax_command;
1098 cs->iif.writecmd = NULL;
1099 cs->iif.writebuf_skb = HiSax_writebuf_skb;
1100 cs->iif.readstat = HiSax_readstatus;
1101 register_isdn(&cs->iif);
1102 cs->myid = cs->iif.channels;
1103
1104 *cs_out = cs;
1105 return 1; /* success */
1106
1107outf_dlog:
1108 kfree(cs->dlog);
1109outf_cs:
1110 kfree(cs);
1111 card->cs = NULL;
1112out:
1113 return 0; /* error */
1114}
1115
1116static int hisax_cs_setup(int cardnr, struct IsdnCard *card,
1117 struct IsdnCardState *cs)
1118{
1119 int ret;
1120
1104 if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) { 1121 if (!(cs->rcvbuf = kmalloc(MAX_DFRAME_LEN_L1, GFP_ATOMIC))) {
1105 printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n"); 1122 printk(KERN_WARNING "HiSax: No memory for isac rcvbuf\n");
1106 ll_unload(cs); 1123 ll_unload(cs);
@@ -1143,11 +1160,41 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow
1143 if (!test_bit(HW_ISAR, &cs->HW_Flags)) 1160 if (!test_bit(HW_ISAR, &cs->HW_Flags))
1144 ll_run(cs, 0); 1161 ll_run(cs, 0);
1145 1162
1146 ret = 1; 1163 return 1;
1164
1165outf_cs:
1166 kfree(cs);
1167 card->cs = NULL;
1168 return ret;
1169}
1170
1171static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockowner)
1172{
1173 int ret;
1174 struct IsdnCard *card = cards + cardnr;
1175 struct IsdnCardState *cs;
1176
1177 ret = hisax_cs_new(cardnr, id, card, &cs, busy_flag, lockowner);
1178 if (!ret)
1179 return 0;
1180
1181 printk(KERN_INFO
1182 "HiSax: Card %d Protocol %s Id=%s (%d)\n", cardnr + 1,
1183 (card->protocol == ISDN_PTYPE_1TR6) ? "1TR6" :
1184 (card->protocol == ISDN_PTYPE_EURO) ? "EDSS1" :
1185 (card->protocol == ISDN_PTYPE_LEASED) ? "LEASED" :
1186 (card->protocol == ISDN_PTYPE_NI1) ? "NI1" :
1187 "NONE", cs->iif.id, cs->myid);
1188
1189 ret = hisax_cs_setup_card(card);
1190 if (!ret) {
1191 ll_unload(cs);
1192 goto outf_cs;
1193 }
1194
1195 ret = hisax_cs_setup(cardnr, card, cs);
1147 goto out; 1196 goto out;
1148 1197
1149 outf_dlog:
1150 kfree(cs->dlog);
1151 outf_cs: 1198 outf_cs:
1152 kfree(cs); 1199 kfree(cs);
1153 card->cs = NULL; 1200 card->cs = NULL;
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index b45de9d408d1..b73027ff50e8 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -300,98 +300,72 @@ enpci_interrupt(int intno, void *dev_id)
300 return IRQ_HANDLED; 300 return IRQ_HANDLED;
301} 301}
302 302
303 303static int __devinit en_pci_probe(struct pci_dev *dev_netjet,
304static struct pci_dev *dev_netjet __devinitdata = NULL; 304 struct IsdnCardState *cs)
305
306/* called by config.c */
307int __devinit
308setup_enternow_pci(struct IsdnCard *card)
309{ 305{
310 int bytecnt; 306 if (pci_enable_device(dev_netjet))
311 struct IsdnCardState *cs = card->cs;
312 char tmp[64];
313
314#ifdef CONFIG_PCI
315#ifdef __BIG_ENDIAN
316#error "not running on big endian machines now"
317#endif
318 strcpy(tmp, enternow_pci_rev);
319 printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
320 if (cs->typ != ISDN_CTYPE_ENTERNOW)
321 return(0); 307 return(0);
322 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 308 cs->irq = dev_netjet->irq;
323 309 if (!cs->irq) {
324 for ( ;; ) 310 printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n");
325 { 311 return(0);
326 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 312 }
327 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) { 313 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
328 if (pci_enable_device(dev_netjet)) 314 if (!cs->hw.njet.base) {
329 return(0); 315 printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n");
330 cs->irq = dev_netjet->irq; 316 return(0);
331 if (!cs->irq) { 317 }
332 printk(KERN_WARNING "enter:now PCI: No IRQ for PCI card found\n"); 318 /* checks Sub-Vendor ID because system crashes with Traverse-Card */
333 return(0); 319 if ((dev_netjet->subsystem_vendor != 0x55) ||
334 } 320 (dev_netjet->subsystem_device != 0x02)) {
335 cs->hw.njet.base = pci_resource_start(dev_netjet, 0); 321 printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
336 if (!cs->hw.njet.base) { 322 printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
337 printk(KERN_WARNING "enter:now PCI: No IO-Adr for PCI card found\n"); 323 return(0);
338 return(0); 324 }
339 }
340 /* checks Sub-Vendor ID because system crashes with Traverse-Card */
341 if ((dev_netjet->subsystem_vendor != 0x55) ||
342 (dev_netjet->subsystem_device != 0x02)) {
343 printk(KERN_WARNING "enter:now: You tried to load this driver with an incompatible TigerJet-card\n");
344 printk(KERN_WARNING "Use type=20 for Traverse NetJet PCI Card.\n");
345 return(0);
346 }
347 } else {
348 printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
349 return(0);
350 }
351
352 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
353 cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
354
355 /* Reset an */
356 cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
357 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
358 /* 20 ms Pause */
359 mdelay(20);
360 325
361 cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */ 326 return(1);
362 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL); 327}
363 mdelay(10);
364 328
365 cs->hw.njet.auxd = 0x00; // war 0xc0 329static void __devinit en_cs_init(struct IsdnCard *card,
366 cs->hw.njet.dmactrl = 0; 330 struct IsdnCardState *cs)
331{
332 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
333 cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
367 334
368 outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL); 335 /* Reset an */
369 outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1); 336 cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
370 outb(cs->hw.njet.auxd, cs->hw.njet.auxa); 337 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
338 /* 20 ms Pause */
339 mdelay(20);
371 340
372 break; 341 cs->hw.njet.ctrl_reg = 0x30; /* Reset Off and status read clear */
373 } 342 outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
374#else 343 mdelay(10);
375 344
376 printk(KERN_WARNING "enter:now PCI: NO_PCI_BIOS\n"); 345 cs->hw.njet.auxd = 0x00; // war 0xc0
377 printk(KERN_WARNING "enter:now PCI: unable to config Formula-n enter:now ISDN PCI ab\n"); 346 cs->hw.njet.dmactrl = 0;
378 return (0);
379 347
380#endif /* CONFIG_PCI */ 348 outb(~TJ_AMD_IRQ, cs->hw.njet.base + NETJET_AUXCTRL);
349 outb(TJ_AMD_IRQ, cs->hw.njet.base + NETJET_IRQMASK1);
350 outb(cs->hw.njet.auxd, cs->hw.njet.auxa);
351}
381 352
382 bytecnt = 256; 353static int __devinit en_cs_init_rest(struct IsdnCard *card,
354 struct IsdnCardState *cs)
355{
356 const int bytecnt = 256;
383 357
384 printk(KERN_INFO 358 printk(KERN_INFO
385 "enter:now PCI: PCI card configured at 0x%lx IRQ %d\n", 359 "enter:now PCI: PCI card configured at 0x%lx IRQ %d\n",
386 cs->hw.njet.base, cs->irq); 360 cs->hw.njet.base, cs->irq);
387 if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) { 361 if (!request_region(cs->hw.njet.base, bytecnt, "Fn_ISDN")) {
388 printk(KERN_WARNING 362 printk(KERN_WARNING
389 "HiSax: %s config port %lx-%lx already in use\n", 363 "HiSax: enter:now config port %lx-%lx already in use\n",
390 CardType[card->typ], 364 cs->hw.njet.base,
391 cs->hw.njet.base, 365 cs->hw.njet.base + bytecnt);
392 cs->hw.njet.base + bytecnt);
393 return (0); 366 return (0);
394 } 367 }
368
395 setup_Amd7930(cs); 369 setup_Amd7930(cs);
396 cs->hw.njet.last_is0 = 0; 370 cs->hw.njet.last_is0 = 0;
397 /* macro rByteAMD */ 371 /* macro rByteAMD */
@@ -407,5 +381,44 @@ setup_enternow_pci(struct IsdnCard *card)
407 cs->irq_func = &enpci_interrupt; 381 cs->irq_func = &enpci_interrupt;
408 cs->irq_flags |= IRQF_SHARED; 382 cs->irq_flags |= IRQF_SHARED;
409 383
410 return (1); 384 return (1);
385}
386
387static struct pci_dev *dev_netjet __devinitdata = NULL;
388
389/* called by config.c */
390int __devinit
391setup_enternow_pci(struct IsdnCard *card)
392{
393 int ret;
394 struct IsdnCardState *cs = card->cs;
395 char tmp[64];
396
397#ifdef __BIG_ENDIAN
398#error "not running on big endian machines now"
399#endif
400
401 strcpy(tmp, enternow_pci_rev);
402 printk(KERN_INFO "HiSax: Formula-n Europe AG enter:now ISDN PCI driver Rev. %s\n", HiSax_getrev(tmp));
403 if (cs->typ != ISDN_CTYPE_ENTERNOW)
404 return(0);
405 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
406
407 for ( ;; )
408 {
409 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
410 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
411 ret = en_pci_probe(dev_netjet, cs);
412 if (!ret)
413 return(0);
414 } else {
415 printk(KERN_WARNING "enter:now PCI: No PCI card found\n");
416 return(0);
417 }
418
419 en_cs_init(card, cs);
420 break;
421 }
422
423 return en_cs_init_rest(card, cs);
411} 424}
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8a48a3ce0a55..077080aca799 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -6,7 +6,7 @@
6 * based on existing driver for CCD hfc ISA cards 6 * based on existing driver for CCD hfc ISA cards
7 * Copyright by Werner Cornelius <werner@isdn4linux.de> 7 * Copyright by Werner Cornelius <werner@isdn4linux.de>
8 * by Karsten Keil <keil@isdn4linux.de> 8 * by Karsten Keil <keil@isdn4linux.de>
9 * 9 *
10 * This software may be used and distributed according to the terms 10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference. 11 * of the GNU General Public License, incorporated herein by reference.
12 * 12 *
@@ -67,8 +67,6 @@ static const PCI_ENTRY id_list[] =
67}; 67};
68 68
69 69
70#ifdef CONFIG_PCI
71
72/******************************************/ 70/******************************************/
73/* free hardware resources used by driver */ 71/* free hardware resources used by driver */
74/******************************************/ 72/******************************************/
@@ -237,7 +235,7 @@ static void hfcpci_clear_fifo_rx(struct IsdnCardState *cs, int fifo)
237 if (fifo_state) 235 if (fifo_state)
238 cs->hw.hfcpci.fifo_en |= fifo_state; 236 cs->hw.hfcpci.fifo_en |= fifo_state;
239 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); 237 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
240} 238}
241 239
242/***************************************/ 240/***************************************/
243/* clear the desired B-channel tx fifo */ 241/* clear the desired B-channel tx fifo */
@@ -263,7 +261,7 @@ static void hfcpci_clear_fifo_tx(struct IsdnCardState *cs, int fifo)
263 if (fifo_state) 261 if (fifo_state)
264 cs->hw.hfcpci.fifo_en |= fifo_state; 262 cs->hw.hfcpci.fifo_en |= fifo_state;
265 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en); 263 Write_hfc(cs, HFCPCI_FIFO_EN, cs->hw.hfcpci.fifo_en);
266} 264}
267 265
268/*********************************************/ 266/*********************************************/
269/* read a complete B-frame out of the buffer */ 267/* read a complete B-frame out of the buffer */
@@ -511,7 +509,6 @@ main_rec_hfcpci(struct BCState *bcs)
511 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 509 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
512 if (count && receive) 510 if (count && receive)
513 goto Begin; 511 goto Begin;
514 return;
515} 512}
516 513
517/**************************/ 514/**************************/
@@ -582,7 +579,6 @@ hfcpci_fill_dfifo(struct IsdnCardState *cs)
582 579
583 dev_kfree_skb_any(cs->tx_skb); 580 dev_kfree_skb_any(cs->tx_skb);
584 cs->tx_skb = NULL; 581 cs->tx_skb = NULL;
585 return;
586} 582}
587 583
588/**************************/ 584/**************************/
@@ -729,7 +725,6 @@ hfcpci_fill_fifo(struct BCState *bcs)
729 dev_kfree_skb_any(bcs->tx_skb); 725 dev_kfree_skb_any(bcs->tx_skb);
730 bcs->tx_skb = NULL; 726 bcs->tx_skb = NULL;
731 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); 727 test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag);
732 return;
733} 728}
734 729
735/**********************************************/ 730/**********************************************/
@@ -924,7 +919,6 @@ receive_emsg(struct IsdnCardState *cs)
924 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 919 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
925 if (count && receive) 920 if (count && receive)
926 goto Begin; 921 goto Begin;
927 return;
928} /* receive_emsg */ 922} /* receive_emsg */
929 923
930/*********************/ 924/*********************/
@@ -1350,13 +1344,13 @@ mode_hfcpci(struct BCState *bcs, int mode, int bc)
1350 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA; 1344 cs->hw.hfcpci.sctrl_r |= SCTRL_B1_ENA;
1351 } 1345 }
1352 if (fifo2) { 1346 if (fifo2) {
1353 cs->hw.hfcpci.last_bfifo_cnt[1] = 0; 1347 cs->hw.hfcpci.last_bfifo_cnt[1] = 0;
1354 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2; 1348 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B2;
1355 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC); 1349 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B2TRANS + HFCPCI_INTS_B2REC);
1356 cs->hw.hfcpci.ctmt &= ~2; 1350 cs->hw.hfcpci.ctmt &= ~2;
1357 cs->hw.hfcpci.conn &= ~0x18; 1351 cs->hw.hfcpci.conn &= ~0x18;
1358 } else { 1352 } else {
1359 cs->hw.hfcpci.last_bfifo_cnt[0] = 0; 1353 cs->hw.hfcpci.last_bfifo_cnt[0] = 0;
1360 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1; 1354 cs->hw.hfcpci.fifo_en |= HFCPCI_FIFOEN_B1;
1361 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC); 1355 cs->hw.hfcpci.int_m1 |= (HFCPCI_INTS_B1TRANS + HFCPCI_INTS_B1REC);
1362 cs->hw.hfcpci.ctmt &= ~1; 1356 cs->hw.hfcpci.ctmt &= ~1;
@@ -1642,8 +1636,6 @@ hfcpci_card_msg(struct IsdnCardState *cs, int mt, void *arg)
1642/* this variable is used as card index when more than one cards are present */ 1636/* this variable is used as card index when more than one cards are present */
1643static struct pci_dev *dev_hfcpci __devinitdata = NULL; 1637static struct pci_dev *dev_hfcpci __devinitdata = NULL;
1644 1638
1645#endif /* CONFIG_PCI */
1646
1647int __devinit 1639int __devinit
1648setup_hfcpci(struct IsdnCard *card) 1640setup_hfcpci(struct IsdnCard *card)
1649{ 1641{
@@ -1656,96 +1648,99 @@ setup_hfcpci(struct IsdnCard *card)
1656#ifdef __BIG_ENDIAN 1648#ifdef __BIG_ENDIAN
1657#error "not running on big endian machines now" 1649#error "not running on big endian machines now"
1658#endif 1650#endif
1651
1659 strcpy(tmp, hfcpci_revision); 1652 strcpy(tmp, hfcpci_revision);
1660 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp)); 1653 printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
1661#ifdef CONFIG_PCI 1654
1662 cs->hw.hfcpci.int_s1 = 0; 1655 cs->hw.hfcpci.int_s1 = 0;
1663 cs->dc.hfcpci.ph_state = 0; 1656 cs->dc.hfcpci.ph_state = 0;
1664 cs->hw.hfcpci.fifo = 255; 1657 cs->hw.hfcpci.fifo = 255;
1665 if (cs->typ == ISDN_CTYPE_HFC_PCI) { 1658 if (cs->typ != ISDN_CTYPE_HFC_PCI)
1666 i = 0; 1659 return(0);
1667 while (id_list[i].vendor_id) { 1660
1668 tmp_hfcpci = pci_find_device(id_list[i].vendor_id, 1661 i = 0;
1669 id_list[i].device_id, 1662 while (id_list[i].vendor_id) {
1670 dev_hfcpci); 1663 tmp_hfcpci = pci_find_device(id_list[i].vendor_id,
1671 i++; 1664 id_list[i].device_id,
1672 if (tmp_hfcpci) { 1665 dev_hfcpci);
1673 if (pci_enable_device(tmp_hfcpci)) 1666 i++;
1674 continue;
1675 pci_set_master(tmp_hfcpci);
1676 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1677 continue;
1678 else
1679 break;
1680 }
1681 }
1682
1683 if (tmp_hfcpci) { 1667 if (tmp_hfcpci) {
1684 i--; 1668 if (pci_enable_device(tmp_hfcpci))
1685 dev_hfcpci = tmp_hfcpci; /* old device */ 1669 continue;
1686 cs->hw.hfcpci.dev = dev_hfcpci; 1670 pci_set_master(tmp_hfcpci);
1687 cs->irq = dev_hfcpci->irq; 1671 if ((card->para[0]) && (card->para[0] != (tmp_hfcpci->resource[ 0].start & PCI_BASE_ADDRESS_IO_MASK)))
1688 if (!cs->irq) { 1672 continue;
1689 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n"); 1673 else
1690 return (0); 1674 break;
1691 }
1692 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1693 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1694 } else {
1695 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1696 return (0);
1697 }
1698 if (!cs->hw.hfcpci.pci_io) {
1699 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1700 return (0);
1701 }
1702 /* Allocate memory for FIFOS */
1703 /* Because the HFC-PCI needs a 32K physical alignment, we */
1704 /* need to allocate the double mem and align the address */
1705 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1706 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1707 return 0;
1708 } 1675 }
1709 cs->hw.hfcpci.fifos = (void *) 1676 }
1710 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000; 1677
1711 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos)); 1678 if (!tmp_hfcpci) {
1712 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256); 1679 printk(KERN_WARNING "HFC-PCI: No PCI card found\n");
1713 printk(KERN_INFO 1680 return (0);
1714 "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n", 1681 }
1715 cs->hw.hfcpci.pci_io, 1682
1716 cs->hw.hfcpci.fifos, 1683 i--;
1717 (u_int) virt_to_bus(cs->hw.hfcpci.fifos), 1684 dev_hfcpci = tmp_hfcpci; /* old device */
1718 cs->irq, HZ); 1685 cs->hw.hfcpci.dev = dev_hfcpci;
1719 spin_lock_irqsave(&cs->lock, flags); 1686 cs->irq = dev_hfcpci->irq;
1720 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */ 1687 if (!cs->irq) {
1721 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */ 1688 printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
1722 cs->hw.hfcpci.int_m1 = 0; 1689 return (0);
1723 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1); 1690 }
1724 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); 1691 cs->hw.hfcpci.pci_io = (char *)(unsigned long)dev_hfcpci->resource[1].start;
1725 /* At this point the needed PCI config is done */ 1692 printk(KERN_INFO "HiSax: HFC-PCI card manufacturer: %s card name: %s\n", id_list[i].vendor_name, id_list[i].card_name);
1726 /* fifos are still not enabled */ 1693
1727 INIT_WORK(&cs->tqueue, hfcpci_bh); 1694 if (!cs->hw.hfcpci.pci_io) {
1728 cs->setstack_d = setstack_hfcpci; 1695 printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
1729 cs->BC_Send_Data = &hfcpci_send_data; 1696 return (0);
1730 cs->readisac = NULL; 1697 }
1731 cs->writeisac = NULL; 1698 /* Allocate memory for FIFOS */
1732 cs->readisacfifo = NULL; 1699 /* Because the HFC-PCI needs a 32K physical alignment, we */
1733 cs->writeisacfifo = NULL; 1700 /* need to allocate the double mem and align the address */
1734 cs->BC_Read_Reg = NULL; 1701 if (!(cs->hw.hfcpci.share_start = kmalloc(65536, GFP_KERNEL))) {
1735 cs->BC_Write_Reg = NULL; 1702 printk(KERN_WARNING "HFC-PCI: Error allocating memory for FIFO!\n");
1736 cs->irq_func = &hfcpci_interrupt; 1703 return 0;
1737 cs->irq_flags |= IRQF_SHARED; 1704 }
1738 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer; 1705 cs->hw.hfcpci.fifos = (void *)
1739 cs->hw.hfcpci.timer.data = (long) cs; 1706 (((ulong) cs->hw.hfcpci.share_start) & ~0x7FFF) + 0x8000;
1740 init_timer(&cs->hw.hfcpci.timer); 1707 pci_write_config_dword(cs->hw.hfcpci.dev, 0x80, (u_int) virt_to_bus(cs->hw.hfcpci.fifos));
1741 cs->cardmsg = &hfcpci_card_msg; 1708 cs->hw.hfcpci.pci_io = ioremap((ulong) cs->hw.hfcpci.pci_io, 256);
1742 cs->auxcmd = &hfcpci_auxcmd; 1709 printk(KERN_INFO
1743 spin_unlock_irqrestore(&cs->lock, flags); 1710 "HFC-PCI: defined at mem %p fifo %p(%#x) IRQ %d HZ %d\n",
1744 return (1); 1711 cs->hw.hfcpci.pci_io,
1745 } else 1712 cs->hw.hfcpci.fifos,
1746 return (0); /* no valid card type */ 1713 (u_int) virt_to_bus(cs->hw.hfcpci.fifos),
1747#else 1714 cs->irq, HZ);
1748 printk(KERN_WARNING "HFC-PCI: NO_PCI_BIOS\n"); 1715
1749 return (0); 1716 spin_lock_irqsave(&cs->lock, flags);
1750#endif /* CONFIG_PCI */ 1717
1718 pci_write_config_word(cs->hw.hfcpci.dev, PCI_COMMAND, PCI_ENA_MEMIO); /* enable memory mapped ports, disable busmaster */
1719 cs->hw.hfcpci.int_m2 = 0; /* disable alle interrupts */
1720 cs->hw.hfcpci.int_m1 = 0;
1721 Write_hfc(cs, HFCPCI_INT_M1, cs->hw.hfcpci.int_m1);
1722 Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2);
1723 /* At this point the needed PCI config is done */
1724 /* fifos are still not enabled */
1725
1726 INIT_WORK(&cs->tqueue, hfcpci_bh);
1727 cs->setstack_d = setstack_hfcpci;
1728 cs->BC_Send_Data = &hfcpci_send_data;
1729 cs->readisac = NULL;
1730 cs->writeisac = NULL;
1731 cs->readisacfifo = NULL;
1732 cs->writeisacfifo = NULL;
1733 cs->BC_Read_Reg = NULL;
1734 cs->BC_Write_Reg = NULL;
1735 cs->irq_func = &hfcpci_interrupt;
1736 cs->irq_flags |= IRQF_SHARED;
1737 cs->hw.hfcpci.timer.function = (void *) hfcpci_Timer;
1738 cs->hw.hfcpci.timer.data = (long) cs;
1739 init_timer(&cs->hw.hfcpci.timer);
1740 cs->cardmsg = &hfcpci_card_msg;
1741 cs->auxcmd = &hfcpci_auxcmd;
1742
1743 spin_unlock_irqrestore(&cs->lock, flags);
1744
1745 return (1);
1751} 1746}
diff --git a/drivers/isdn/hisax/nj_s.c b/drivers/isdn/hisax/nj_s.c
index c09ffb135330..fa2db87667c8 100644
--- a/drivers/isdn/hisax/nj_s.c
+++ b/drivers/isdn/hisax/nj_s.c
@@ -148,107 +148,87 @@ NETjet_S_card_msg(struct IsdnCardState *cs, int mt, void *arg)
148 return(0); 148 return(0);
149} 149}
150 150
151static struct pci_dev *dev_netjet __devinitdata = NULL; 151static int __devinit njs_pci_probe(struct pci_dev *dev_netjet,
152 152 struct IsdnCardState *cs)
153int __devinit
154setup_netjet_s(struct IsdnCard *card)
155{ 153{
156 int bytecnt,cfg; 154 int cfg;
157 struct IsdnCardState *cs = card->cs;
158 char tmp[64];
159 155
160#ifdef __BIG_ENDIAN 156 if (pci_enable_device(dev_netjet))
161#error "not running on big endian machines now"
162#endif
163 strcpy(tmp, NETjet_S_revision);
164 printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
165 if (cs->typ != ISDN_CTYPE_NETJET_S)
166 return(0); 157 return(0);
167 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 158 pci_set_master(dev_netjet);
159 cs->irq = dev_netjet->irq;
160 if (!cs->irq) {
161 printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
162 return(0);
163 }
164 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
165 if (!cs->hw.njet.base) {
166 printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
167 return(0);
168 }
169 /* the TJ300 and TJ320 must be detected, the IRQ handling is different
170 * unfortunatly the chips use the same device ID, but the TJ320 has
171 * the bit20 in status PCI cfg register set
172 */
173 pci_read_config_dword(dev_netjet, 0x04, &cfg);
174 if (cfg & 0x00100000)
175 cs->subtyp = 1; /* TJ320 */
176 else
177 cs->subtyp = 0; /* TJ300 */
178 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
179 if ((dev_netjet->subsystem_vendor == 0x55) &&
180 (dev_netjet->subsystem_device == 0x02)) {
181 printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
182 printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
183 return(0);
184 }
185 /* end new code */
168 186
169#ifdef CONFIG_PCI 187 return(1);
188}
170 189
171 for ( ;; ) 190static int __devinit njs_cs_init(struct IsdnCard *card,
172 { 191 struct IsdnCardState *cs)
173 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET, 192{
174 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
175 if (pci_enable_device(dev_netjet))
176 return(0);
177 pci_set_master(dev_netjet);
178 cs->irq = dev_netjet->irq;
179 if (!cs->irq) {
180 printk(KERN_WARNING "NETjet-S: No IRQ for PCI card found\n");
181 return(0);
182 }
183 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
184 if (!cs->hw.njet.base) {
185 printk(KERN_WARNING "NETjet-S: No IO-Adr for PCI card found\n");
186 return(0);
187 }
188 /* the TJ300 and TJ320 must be detected, the IRQ handling is different
189 * unfortunatly the chips use the same device ID, but the TJ320 has
190 * the bit20 in status PCI cfg register set
191 */
192 pci_read_config_dword(dev_netjet, 0x04, &cfg);
193 if (cfg & 0x00100000)
194 cs->subtyp = 1; /* TJ320 */
195 else
196 cs->subtyp = 0; /* TJ300 */
197 /* 2001/10/04 Christoph Ersfeld, Formula-n Europe AG www.formula-n.com */
198 if ((dev_netjet->subsystem_vendor == 0x55) &&
199 (dev_netjet->subsystem_device == 0x02)) {
200 printk(KERN_WARNING "Netjet: You tried to load this driver with an incompatible TigerJet-card\n");
201 printk(KERN_WARNING "Use type=41 for Formula-n enter:now ISDN PCI and compatible\n");
202 return(0);
203 }
204 /* end new code */
205 } else {
206 printk(KERN_WARNING "NETjet-S: No PCI card found\n");
207 return(0);
208 }
209 193
210 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA; 194 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
211 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF; 195 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
212 196
213 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */ 197 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
214 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 198 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
215 mdelay(10); 199 mdelay(10);
216 200
217 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */ 201 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
218 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 202 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
219 mdelay(10); 203 mdelay(10);
220 204
221 cs->hw.njet.auxd = 0xC0; 205 cs->hw.njet.auxd = 0xC0;
222 cs->hw.njet.dmactrl = 0; 206 cs->hw.njet.dmactrl = 0;
223 207
224 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ); 208 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
225 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ); 209 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
226 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd); 210 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
227 211
228 switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) ) 212 switch ( ( ( NETjet_ReadIC( cs, ISAC_RBCH ) >> 5 ) & 3 ) )
229 { 213 {
230 case 0 : 214 case 0 :
231 break; 215 return 1; /* end loop */
232 216
233 case 3 : 217 case 3 :
234 printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" ); 218 printk( KERN_WARNING "NETjet-S: NETspider-U PCI card found\n" );
235 continue; 219 return -1; /* continue looping */
236 220
237 default : 221 default :
238 printk( KERN_WARNING "NETjet-S: No PCI card found\n" ); 222 printk( KERN_WARNING "NETjet-S: No PCI card found\n" );
239 return 0; 223 return 0; /* end loop & function */
240 }
241 break;
242 } 224 }
243#else 225 return 1; /* end loop */
244 226}
245 printk(KERN_WARNING "NETjet-S: NO_PCI_BIOS\n");
246 printk(KERN_WARNING "NETjet-S: unable to config NETJET-S PCI\n");
247 return (0);
248
249#endif /* CONFIG_PCI */
250 227
251 bytecnt = 256; 228static int __devinit njs_cs_init_rest(struct IsdnCard *card,
229 struct IsdnCardState *cs)
230{
231 const int bytecnt = 256;
252 232
253 printk(KERN_INFO 233 printk(KERN_INFO
254 "NETjet-S: %s card configured at %#lx IRQ %d\n", 234 "NETjet-S: %s card configured at %#lx IRQ %d\n",
@@ -273,5 +253,47 @@ setup_netjet_s(struct IsdnCard *card)
273 cs->irq_func = &netjet_s_interrupt; 253 cs->irq_func = &netjet_s_interrupt;
274 cs->irq_flags |= IRQF_SHARED; 254 cs->irq_flags |= IRQF_SHARED;
275 ISACVersion(cs, "NETjet-S:"); 255 ISACVersion(cs, "NETjet-S:");
256
276 return (1); 257 return (1);
277} 258}
259
260static struct pci_dev *dev_netjet __devinitdata = NULL;
261
262int __devinit
263setup_netjet_s(struct IsdnCard *card)
264{
265 int ret;
266 struct IsdnCardState *cs = card->cs;
267 char tmp[64];
268
269#ifdef __BIG_ENDIAN
270#error "not running on big endian machines now"
271#endif
272 strcpy(tmp, NETjet_S_revision);
273 printk(KERN_INFO "HiSax: Traverse Tech. NETjet-S driver Rev. %s\n", HiSax_getrev(tmp));
274 if (cs->typ != ISDN_CTYPE_NETJET_S)
275 return(0);
276 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
277
278 for ( ;; )
279 {
280 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
281 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
282 ret = njs_pci_probe(dev_netjet, cs);
283 if (!ret)
284 return(0);
285 } else {
286 printk(KERN_WARNING "NETjet-S: No PCI card found\n");
287 return(0);
288 }
289
290 ret = njs_cs_init(card, cs);
291 if (!ret)
292 return(0);
293 if (ret > 0)
294 break;
295 /* otherwise, ret < 0, continue looping */
296 }
297
298 return njs_cs_init_rest(card, cs);
299}
diff --git a/drivers/isdn/hisax/nj_u.c b/drivers/isdn/hisax/nj_u.c
index 8202cf34ecae..f017d3816b1d 100644
--- a/drivers/isdn/hisax/nj_u.c
+++ b/drivers/isdn/hisax/nj_u.c
@@ -128,93 +128,69 @@ NETjet_U_card_msg(struct IsdnCardState *cs, int mt, void *arg)
128 return(0); 128 return(0);
129} 129}
130 130
131static struct pci_dev *dev_netjet __devinitdata = NULL; 131static int __devinit nju_pci_probe(struct pci_dev *dev_netjet,
132 132 struct IsdnCardState *cs)
133int __devinit
134setup_netjet_u(struct IsdnCard *card)
135{ 133{
136 int bytecnt; 134 if (pci_enable_device(dev_netjet))
137 struct IsdnCardState *cs = card->cs;
138 char tmp[64];
139#ifdef CONFIG_PCI
140#endif
141#ifdef __BIG_ENDIAN
142#error "not running on big endian machines now"
143#endif
144 strcpy(tmp, NETjet_U_revision);
145 printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
146 if (cs->typ != ISDN_CTYPE_NETJET_U)
147 return(0); 135 return(0);
148 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags); 136 pci_set_master(dev_netjet);
149 137 cs->irq = dev_netjet->irq;
150#ifdef CONFIG_PCI 138 if (!cs->irq) {
139 printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
140 return(0);
141 }
142 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
143 if (!cs->hw.njet.base) {
144 printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
145 return(0);
146 }
151 147
152 for ( ;; ) 148 return (1);
153 { 149}
154 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
155 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
156 if (pci_enable_device(dev_netjet))
157 return(0);
158 pci_set_master(dev_netjet);
159 cs->irq = dev_netjet->irq;
160 if (!cs->irq) {
161 printk(KERN_WARNING "NETspider-U: No IRQ for PCI card found\n");
162 return(0);
163 }
164 cs->hw.njet.base = pci_resource_start(dev_netjet, 0);
165 if (!cs->hw.njet.base) {
166 printk(KERN_WARNING "NETspider-U: No IO-Adr for PCI card found\n");
167 return(0);
168 }
169 } else {
170 printk(KERN_WARNING "NETspider-U: No PCI card found\n");
171 return(0);
172 }
173 150
174 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA; 151static int __devinit nju_cs_init(struct IsdnCard *card,
175 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF; 152 struct IsdnCardState *cs)
176 mdelay(10); 153{
154 cs->hw.njet.auxa = cs->hw.njet.base + NETJET_AUXDATA;
155 cs->hw.njet.isac = cs->hw.njet.base | NETJET_ISAC_OFF;
156 mdelay(10);
177 157
178 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */ 158 cs->hw.njet.ctrl_reg = 0xff; /* Reset On */
179 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 159 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
180 mdelay(10); 160 mdelay(10);
181 161
182 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */ 162 cs->hw.njet.ctrl_reg = 0x00; /* Reset Off and status read clear */
183 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg); 163 byteout(cs->hw.njet.base + NETJET_CTRL, cs->hw.njet.ctrl_reg);
184 mdelay(10); 164 mdelay(10);
185 165
186 cs->hw.njet.auxd = 0xC0; 166 cs->hw.njet.auxd = 0xC0;
187 cs->hw.njet.dmactrl = 0; 167 cs->hw.njet.dmactrl = 0;
188 168
189 byteout(cs->hw.njet.auxa, 0); 169 byteout(cs->hw.njet.auxa, 0);
190 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ); 170 byteout(cs->hw.njet.base + NETJET_AUXCTRL, ~NETJET_ISACIRQ);
191 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ); 171 byteout(cs->hw.njet.base + NETJET_IRQMASK1, NETJET_ISACIRQ);
192 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd); 172 byteout(cs->hw.njet.auxa, cs->hw.njet.auxd);
193 173
194 switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) ) 174 switch ( ( ( NETjet_ReadIC( cs, ICC_RBCH ) >> 5 ) & 3 ) )
195 { 175 {
196 case 3 : 176 case 3 :
197 break; 177 return 1; /* end loop */
198 178
199 case 0 : 179 case 0 :
200 printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" ); 180 printk( KERN_WARNING "NETspider-U: NETjet-S PCI card found\n" );
201 continue; 181 return -1; /* continue looping */
202 182
203 default : 183 default :
204 printk( KERN_WARNING "NETspider-U: No PCI card found\n" ); 184 printk( KERN_WARNING "NETspider-U: No PCI card found\n" );
205 return 0; 185 return 0; /* end loop & function */
206 }
207 break;
208 } 186 }
209#else 187 return 1; /* end loop */
210 188}
211 printk(KERN_WARNING "NETspider-U: NO_PCI_BIOS\n");
212 printk(KERN_WARNING "NETspider-U: unable to config NETspider-U PCI\n");
213 return (0);
214
215#endif /* CONFIG_PCI */
216 189
217 bytecnt = 256; 190static int __devinit nju_cs_init_rest(struct IsdnCard *card,
191 struct IsdnCardState *cs)
192{
193 const int bytecnt = 256;
218 194
219 printk(KERN_INFO 195 printk(KERN_INFO
220 "NETspider-U: PCI card configured at %#lx IRQ %d\n", 196 "NETspider-U: PCI card configured at %#lx IRQ %d\n",
@@ -239,5 +215,48 @@ setup_netjet_u(struct IsdnCard *card)
239 cs->irq_func = &netjet_u_interrupt; 215 cs->irq_func = &netjet_u_interrupt;
240 cs->irq_flags |= IRQF_SHARED; 216 cs->irq_flags |= IRQF_SHARED;
241 ICCVersion(cs, "NETspider-U:"); 217 ICCVersion(cs, "NETspider-U:");
218
242 return (1); 219 return (1);
243} 220}
221
222static struct pci_dev *dev_netjet __devinitdata = NULL;
223
224int __devinit
225setup_netjet_u(struct IsdnCard *card)
226{
227 int ret;
228 struct IsdnCardState *cs = card->cs;
229 char tmp[64];
230
231#ifdef __BIG_ENDIAN
232#error "not running on big endian machines now"
233#endif
234
235 strcpy(tmp, NETjet_U_revision);
236 printk(KERN_INFO "HiSax: Traverse Tech. NETspider-U driver Rev. %s\n", HiSax_getrev(tmp));
237 if (cs->typ != ISDN_CTYPE_NETJET_U)
238 return(0);
239 test_and_clear_bit(FLG_LOCK_ATOMIC, &cs->HW_Flags);
240
241 for ( ;; )
242 {
243 if ((dev_netjet = pci_find_device(PCI_VENDOR_ID_TIGERJET,
244 PCI_DEVICE_ID_TIGERJET_300, dev_netjet))) {
245 ret = nju_pci_probe(dev_netjet, cs);
246 if (!ret)
247 return(0);
248 } else {
249 printk(KERN_WARNING "NETspider-U: No PCI card found\n");
250 return(0);
251 }
252
253 ret = nju_cs_init(card, cs);
254 if (!ret)
255 return (0);
256 if (ret > 0)
257 break;
258 /* ret < 0 == continue looping */
259 }
260
261 return nju_cs_init_rest(card, cs);
262}
diff --git a/drivers/isdn/hisax/sedlbauer.c b/drivers/isdn/hisax/sedlbauer.c
index 030d1625c5c6..ad06f3cc60fb 100644
--- a/drivers/isdn/hisax/sedlbauer.c
+++ b/drivers/isdn/hisax/sedlbauer.c
@@ -451,6 +451,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
451 spin_unlock_irqrestore(&cs->lock, flags); 451 spin_unlock_irqrestore(&cs->lock, flags);
452 return(0); 452 return(0);
453 case CARD_RELEASE: 453 case CARD_RELEASE:
454 if (cs->hw.sedl.bus == SEDL_BUS_PCI)
455 /* disable all IRQ */
456 byteout(cs->hw.sedl.cfg_reg+ 5, 0);
454 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) { 457 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
455 spin_lock_irqsave(&cs->lock, flags); 458 spin_lock_irqsave(&cs->lock, flags);
456 writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx, 459 writereg(cs->hw.sedl.adr, cs->hw.sedl.hscx,
@@ -468,6 +471,9 @@ Sedl_card_msg(struct IsdnCardState *cs, int mt, void *arg)
468 return(0); 471 return(0);
469 case CARD_INIT: 472 case CARD_INIT:
470 spin_lock_irqsave(&cs->lock, flags); 473 spin_lock_irqsave(&cs->lock, flags);
474 if (cs->hw.sedl.bus == SEDL_BUS_PCI)
475 /* enable all IRQ */
476 byteout(cs->hw.sedl.cfg_reg+ 5, 0x02);
471 reset_sedlbauer(cs); 477 reset_sedlbauer(cs);
472 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) { 478 if (cs->hw.sedl.chip == SEDL_CHIP_ISAC_ISAR) {
473 clear_pending_isac_ints(cs); 479 clear_pending_isac_ints(cs);
@@ -667,7 +673,7 @@ setup_sedlbauer(struct IsdnCard *card)
667 byteout(cs->hw.sedl.cfg_reg, 0xff); 673 byteout(cs->hw.sedl.cfg_reg, 0xff);
668 byteout(cs->hw.sedl.cfg_reg, 0x00); 674 byteout(cs->hw.sedl.cfg_reg, 0x00);
669 byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd); 675 byteout(cs->hw.sedl.cfg_reg+ 2, 0xdd);
670 byteout(cs->hw.sedl.cfg_reg+ 5, 0x02); 676 byteout(cs->hw.sedl.cfg_reg+ 5, 0); /* disable all IRQ */
671 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on); 677 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_on);
672 mdelay(2); 678 mdelay(2);
673 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off); 679 byteout(cs->hw.sedl.cfg_reg +3, cs->hw.sedl.reset_off);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 3ef567b99c74..e91c187992dd 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -86,7 +86,6 @@ config ISDN_X25
86 86
87 87
88menu "ISDN feature submodules" 88menu "ISDN feature submodules"
89 depends on ISDN
90 89
91config ISDN_DRV_LOOP 90config ISDN_DRV_LOOP
92 tristate "isdnloop support" 91 tristate "isdnloop support"
@@ -100,7 +99,7 @@ config ISDN_DRV_LOOP
100 99
101config ISDN_DIVERSION 100config ISDN_DIVERSION
102 tristate "Support isdn diversion services" 101 tristate "Support isdn diversion services"
103 depends on ISDN && ISDN_I4L 102 depends on ISDN_I4L
104 help 103 help
105 This option allows you to use some supplementary diversion 104 This option allows you to use some supplementary diversion
106 services in conjunction with the HiSax driver on an EURO/DSS1 105 services in conjunction with the HiSax driver on an EURO/DSS1
@@ -120,13 +119,13 @@ config ISDN_DIVERSION
120endmenu 119endmenu
121 120
122comment "ISDN4Linux hardware drivers" 121comment "ISDN4Linux hardware drivers"
123 depends on NET && ISDN && ISDN_I4L 122 depends on ISDN_I4L
124 123
125source "drivers/isdn/hisax/Kconfig" 124source "drivers/isdn/hisax/Kconfig"
126 125
127 126
128menu "Active cards" 127menu "Active cards"
129 depends on NET && ISDN && ISDN_I4L!=n 128 depends on ISDN_I4L!=n
130 129
131source "drivers/isdn/icn/Kconfig" 130source "drivers/isdn/icn/Kconfig"
132 131
diff --git a/drivers/kvm/Kconfig b/drivers/kvm/Kconfig
index e8e37d826478..33fa28a8c199 100644
--- a/drivers/kvm/Kconfig
+++ b/drivers/kvm/Kconfig
@@ -1,12 +1,17 @@
1# 1#
2# KVM configuration 2# KVM configuration
3# 3#
4menu "Virtualization" 4menuconfig VIRTUALIZATION
5 bool "Virtualization"
5 depends on X86 6 depends on X86
7 default y
8
9if VIRTUALIZATION
6 10
7config KVM 11config KVM
8 tristate "Kernel-based Virtual Machine (KVM) support" 12 tristate "Kernel-based Virtual Machine (KVM) support"
9 depends on X86 && EXPERIMENTAL 13 depends on X86 && EXPERIMENTAL
14 depends on X86_CMPXCHG64 || 64BIT
10 ---help--- 15 ---help---
11 Support hosting fully virtualized guest machines using hardware 16 Support hosting fully virtualized guest machines using hardware
12 virtualization extensions. You will need a fairly recent 17 virtualization extensions. You will need a fairly recent
@@ -35,4 +40,4 @@ config KVM_AMD
35 Provides support for KVM on AMD processors equipped with the AMD-V 40 Provides support for KVM on AMD processors equipped with the AMD-V
36 (SVM) extensions. 41 (SVM) extensions.
37 42
38endmenu 43endif # VIRTUALIZATION
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 152312c1fafa..a7c5e6bee034 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -10,6 +10,8 @@
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/signal.h>
14#include <linux/sched.h>
13#include <linux/mm.h> 15#include <linux/mm.h>
14#include <asm/signal.h> 16#include <asm/signal.h>
15 17
@@ -18,6 +20,7 @@
18#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
19 21
20#define CR0_PE_MASK (1ULL << 0) 22#define CR0_PE_MASK (1ULL << 0)
23#define CR0_MP_MASK (1ULL << 1)
21#define CR0_TS_MASK (1ULL << 3) 24#define CR0_TS_MASK (1ULL << 3)
22#define CR0_NE_MASK (1ULL << 5) 25#define CR0_NE_MASK (1ULL << 5)
23#define CR0_WP_MASK (1ULL << 16) 26#define CR0_WP_MASK (1ULL << 16)
@@ -42,7 +45,8 @@
42 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ 45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \
43 | CR0_NW_MASK | CR0_CD_MASK) 46 | CR0_NW_MASK | CR0_CD_MASK)
44#define KVM_VM_CR0_ALWAYS_ON \ 47#define KVM_VM_CR0_ALWAYS_ON \
45 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK) 48 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK | CR0_TS_MASK \
49 | CR0_MP_MASK)
46#define KVM_GUEST_CR4_MASK \ 50#define KVM_GUEST_CR4_MASK \
47 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) 51 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK)
48#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) 52#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK)
@@ -51,10 +55,10 @@
51#define INVALID_PAGE (~(hpa_t)0) 55#define INVALID_PAGE (~(hpa_t)0)
52#define UNMAPPED_GVA (~(gpa_t)0) 56#define UNMAPPED_GVA (~(gpa_t)0)
53 57
54#define KVM_MAX_VCPUS 1 58#define KVM_MAX_VCPUS 4
55#define KVM_ALIAS_SLOTS 4 59#define KVM_ALIAS_SLOTS 4
56#define KVM_MEMORY_SLOTS 4 60#define KVM_MEMORY_SLOTS 4
57#define KVM_NUM_MMU_PAGES 256 61#define KVM_NUM_MMU_PAGES 1024
58#define KVM_MIN_FREE_MMU_PAGES 5 62#define KVM_MIN_FREE_MMU_PAGES 5
59#define KVM_REFILL_PAGES 25 63#define KVM_REFILL_PAGES 25
60#define KVM_MAX_CPUID_ENTRIES 40 64#define KVM_MAX_CPUID_ENTRIES 40
@@ -80,6 +84,11 @@
80#define KVM_PIO_PAGE_OFFSET 1 84#define KVM_PIO_PAGE_OFFSET 1
81 85
82/* 86/*
87 * vcpu->requests bit members
88 */
89#define KVM_TLB_FLUSH 0
90
91/*
83 * Address types: 92 * Address types:
84 * 93 *
85 * gva - guest virtual address 94 * gva - guest virtual address
@@ -137,7 +146,7 @@ struct kvm_mmu_page {
137 gfn_t gfn; 146 gfn_t gfn;
138 union kvm_mmu_page_role role; 147 union kvm_mmu_page_role role;
139 148
140 hpa_t page_hpa; 149 u64 *spt;
141 unsigned long slot_bitmap; /* One bit set per slot which has memory 150 unsigned long slot_bitmap; /* One bit set per slot which has memory
142 * in this shadow page. 151 * in this shadow page.
143 */ 152 */
@@ -232,6 +241,7 @@ struct kvm_pio_request {
232 struct page *guest_pages[2]; 241 struct page *guest_pages[2];
233 unsigned guest_page_offset; 242 unsigned guest_page_offset;
234 int in; 243 int in;
244 int port;
235 int size; 245 int size;
236 int string; 246 int string;
237 int down; 247 int down;
@@ -252,8 +262,70 @@ struct kvm_stat {
252 u32 halt_exits; 262 u32 halt_exits;
253 u32 request_irq_exits; 263 u32 request_irq_exits;
254 u32 irq_exits; 264 u32 irq_exits;
265 u32 light_exits;
266 u32 efer_reload;
267};
268
269struct kvm_io_device {
270 void (*read)(struct kvm_io_device *this,
271 gpa_t addr,
272 int len,
273 void *val);
274 void (*write)(struct kvm_io_device *this,
275 gpa_t addr,
276 int len,
277 const void *val);
278 int (*in_range)(struct kvm_io_device *this, gpa_t addr);
279 void (*destructor)(struct kvm_io_device *this);
280
281 void *private;
282};
283
284static inline void kvm_iodevice_read(struct kvm_io_device *dev,
285 gpa_t addr,
286 int len,
287 void *val)
288{
289 dev->read(dev, addr, len, val);
290}
291
292static inline void kvm_iodevice_write(struct kvm_io_device *dev,
293 gpa_t addr,
294 int len,
295 const void *val)
296{
297 dev->write(dev, addr, len, val);
298}
299
300static inline int kvm_iodevice_inrange(struct kvm_io_device *dev, gpa_t addr)
301{
302 return dev->in_range(dev, addr);
303}
304
305static inline void kvm_iodevice_destructor(struct kvm_io_device *dev)
306{
307 if (dev->destructor)
308 dev->destructor(dev);
309}
310
311/*
312 * It would be nice to use something smarter than a linear search, TBD...
313 * Thankfully we dont expect many devices to register (famous last words :),
314 * so until then it will suffice. At least its abstracted so we can change
315 * in one place.
316 */
317struct kvm_io_bus {
318 int dev_count;
319#define NR_IOBUS_DEVS 6
320 struct kvm_io_device *devs[NR_IOBUS_DEVS];
255}; 321};
256 322
323void kvm_io_bus_init(struct kvm_io_bus *bus);
324void kvm_io_bus_destroy(struct kvm_io_bus *bus);
325struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
326void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
327 struct kvm_io_device *dev);
328
257struct kvm_vcpu { 329struct kvm_vcpu {
258 struct kvm *kvm; 330 struct kvm *kvm;
259 union { 331 union {
@@ -266,6 +338,8 @@ struct kvm_vcpu {
266 u64 host_tsc; 338 u64 host_tsc;
267 struct kvm_run *run; 339 struct kvm_run *run;
268 int interrupt_window_open; 340 int interrupt_window_open;
341 int guest_mode;
342 unsigned long requests;
269 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 343 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
270#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 344#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long)
271 unsigned long irq_pending[NR_IRQ_WORDS]; 345 unsigned long irq_pending[NR_IRQ_WORDS];
@@ -285,15 +359,20 @@ struct kvm_vcpu {
285 u64 apic_base; 359 u64 apic_base;
286 u64 ia32_misc_enable_msr; 360 u64 ia32_misc_enable_msr;
287 int nmsrs; 361 int nmsrs;
362 int save_nmsrs;
363 int msr_offset_efer;
364#ifdef CONFIG_X86_64
365 int msr_offset_kernel_gs_base;
366#endif
288 struct vmx_msr_entry *guest_msrs; 367 struct vmx_msr_entry *guest_msrs;
289 struct vmx_msr_entry *host_msrs; 368 struct vmx_msr_entry *host_msrs;
290 369
291 struct list_head free_pages;
292 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES];
293 struct kvm_mmu mmu; 370 struct kvm_mmu mmu;
294 371
295 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 372 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
296 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 373 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
374 struct kvm_mmu_memory_cache mmu_page_cache;
375 struct kvm_mmu_memory_cache mmu_page_header_cache;
297 376
298 gfn_t last_pt_write_gfn; 377 gfn_t last_pt_write_gfn;
299 int last_pt_write_count; 378 int last_pt_write_count;
@@ -305,6 +384,11 @@ struct kvm_vcpu {
305 char *guest_fx_image; 384 char *guest_fx_image;
306 int fpu_active; 385 int fpu_active;
307 int guest_fpu_loaded; 386 int guest_fpu_loaded;
387 struct vmx_host_state {
388 int loaded;
389 u16 fs_sel, gs_sel, ldt_sel;
390 int fs_gs_ldt_reload_needed;
391 } vmx_host_state;
308 392
309 int mmio_needed; 393 int mmio_needed;
310 int mmio_read_completed; 394 int mmio_read_completed;
@@ -331,6 +415,7 @@ struct kvm_vcpu {
331 u32 ar; 415 u32 ar;
332 } tr, es, ds, fs, gs; 416 } tr, es, ds, fs, gs;
333 } rmode; 417 } rmode;
418 int halt_request; /* real mode on Intel only */
334 419
335 int cpuid_nent; 420 int cpuid_nent;
336 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; 421 struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
@@ -362,12 +447,15 @@ struct kvm {
362 struct list_head active_mmu_pages; 447 struct list_head active_mmu_pages;
363 int n_free_mmu_pages; 448 int n_free_mmu_pages;
364 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 449 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
450 int nvcpus;
365 struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; 451 struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
366 int memory_config_version; 452 int memory_config_version;
367 int busy; 453 int busy;
368 unsigned long rmap_overflow; 454 unsigned long rmap_overflow;
369 struct list_head vm_list; 455 struct list_head vm_list;
370 struct file *filp; 456 struct file *filp;
457 struct kvm_io_bus mmio_bus;
458 struct kvm_io_bus pio_bus;
371}; 459};
372 460
373struct descriptor_table { 461struct descriptor_table {
@@ -488,6 +576,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
488 int size, unsigned long count, int string, int down, 576 int size, unsigned long count, int string, int down,
489 gva_t address, int rep, unsigned port); 577 gva_t address, int rep, unsigned port);
490void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 578void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
579int kvm_emulate_halt(struct kvm_vcpu *vcpu);
491int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 580int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
492int emulate_clts(struct kvm_vcpu *vcpu); 581int emulate_clts(struct kvm_vcpu *vcpu);
493int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, 582int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr,
@@ -511,6 +600,7 @@ void save_msrs(struct vmx_msr_entry *e, int n);
511void kvm_resched(struct kvm_vcpu *vcpu); 600void kvm_resched(struct kvm_vcpu *vcpu);
512void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 601void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
513void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 602void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
603void kvm_flush_remote_tlbs(struct kvm *kvm);
514 604
515int kvm_read_guest(struct kvm_vcpu *vcpu, 605int kvm_read_guest(struct kvm_vcpu *vcpu,
516 gva_t addr, 606 gva_t addr,
@@ -524,10 +614,12 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
524 614
525unsigned long segment_base(u16 selector); 615unsigned long segment_base(u16 selector);
526 616
527void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 617void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
528void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 618 const u8 *old, const u8 *new, int bytes);
529int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 619int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
530void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 620void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
621int kvm_mmu_load(struct kvm_vcpu *vcpu);
622void kvm_mmu_unload(struct kvm_vcpu *vcpu);
531 623
532int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run); 624int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run);
533 625
@@ -539,6 +631,14 @@ static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
539 return vcpu->mmu.page_fault(vcpu, gva, error_code); 631 return vcpu->mmu.page_fault(vcpu, gva, error_code);
540} 632}
541 633
634static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
635{
636 if (likely(vcpu->mmu.root_hpa != INVALID_PAGE))
637 return 0;
638
639 return kvm_mmu_load(vcpu);
640}
641
542static inline int is_long_mode(struct kvm_vcpu *vcpu) 642static inline int is_long_mode(struct kvm_vcpu *vcpu)
543{ 643{
544#ifdef CONFIG_X86_64 644#ifdef CONFIG_X86_64
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 8f1f07adb04e..1b206f197c6b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -16,34 +16,33 @@
16 */ 16 */
17 17
18#include "kvm.h" 18#include "kvm.h"
19#include "x86_emulate.h"
20#include "segment_descriptor.h"
19 21
20#include <linux/kvm.h> 22#include <linux/kvm.h>
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/errno.h> 24#include <linux/errno.h>
23#include <linux/magic.h>
24#include <asm/processor.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/gfp.h> 26#include <linux/gfp.h>
27#include <asm/msr.h>
28#include <linux/mm.h> 27#include <linux/mm.h>
29#include <linux/miscdevice.h> 28#include <linux/miscdevice.h>
30#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
31#include <asm/uaccess.h>
32#include <linux/reboot.h> 30#include <linux/reboot.h>
33#include <asm/io.h>
34#include <linux/debugfs.h> 31#include <linux/debugfs.h>
35#include <linux/highmem.h> 32#include <linux/highmem.h>
36#include <linux/file.h> 33#include <linux/file.h>
37#include <asm/desc.h>
38#include <linux/sysdev.h> 34#include <linux/sysdev.h>
39#include <linux/cpu.h> 35#include <linux/cpu.h>
40#include <linux/file.h>
41#include <linux/fs.h>
42#include <linux/mount.h>
43#include <linux/sched.h> 36#include <linux/sched.h>
37#include <linux/cpumask.h>
38#include <linux/smp.h>
39#include <linux/anon_inodes.h>
44 40
45#include "x86_emulate.h" 41#include <asm/processor.h>
46#include "segment_descriptor.h" 42#include <asm/msr.h>
43#include <asm/io.h>
44#include <asm/uaccess.h>
45#include <asm/desc.h>
47 46
48MODULE_AUTHOR("Qumranet"); 47MODULE_AUTHOR("Qumranet");
49MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
@@ -51,8 +50,12 @@ MODULE_LICENSE("GPL");
51static DEFINE_SPINLOCK(kvm_lock); 50static DEFINE_SPINLOCK(kvm_lock);
52static LIST_HEAD(vm_list); 51static LIST_HEAD(vm_list);
53 52
53static cpumask_t cpus_hardware_enabled;
54
54struct kvm_arch_ops *kvm_arch_ops; 55struct kvm_arch_ops *kvm_arch_ops;
55 56
57static void hardware_disable(void *ignored);
58
56#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x) 59#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
57 60
58static struct kvm_stats_debugfs_item { 61static struct kvm_stats_debugfs_item {
@@ -72,13 +75,13 @@ static struct kvm_stats_debugfs_item {
72 { "halt_exits", STAT_OFFSET(halt_exits) }, 75 { "halt_exits", STAT_OFFSET(halt_exits) },
73 { "request_irq", STAT_OFFSET(request_irq_exits) }, 76 { "request_irq", STAT_OFFSET(request_irq_exits) },
74 { "irq_exits", STAT_OFFSET(irq_exits) }, 77 { "irq_exits", STAT_OFFSET(irq_exits) },
78 { "light_exits", STAT_OFFSET(light_exits) },
79 { "efer_reload", STAT_OFFSET(efer_reload) },
75 { NULL } 80 { NULL }
76}; 81};
77 82
78static struct dentry *debugfs_dir; 83static struct dentry *debugfs_dir;
79 84
80struct vfsmount *kvmfs_mnt;
81
82#define MAX_IO_MSRS 256 85#define MAX_IO_MSRS 256
83 86
84#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL 87#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
@@ -100,55 +103,6 @@ struct segment_descriptor_64 {
100static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 103static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
101 unsigned long arg); 104 unsigned long arg);
102 105
103static struct inode *kvmfs_inode(struct file_operations *fops)
104{
105 int error = -ENOMEM;
106 struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
107
108 if (!inode)
109 goto eexit_1;
110
111 inode->i_fop = fops;
112
113 /*
114 * Mark the inode dirty from the very beginning,
115 * that way it will never be moved to the dirty
116 * list because mark_inode_dirty() will think
117 * that it already _is_ on the dirty list.
118 */
119 inode->i_state = I_DIRTY;
120 inode->i_mode = S_IRUSR | S_IWUSR;
121 inode->i_uid = current->fsuid;
122 inode->i_gid = current->fsgid;
123 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
124 return inode;
125
126eexit_1:
127 return ERR_PTR(error);
128}
129
130static struct file *kvmfs_file(struct inode *inode, void *private_data)
131{
132 struct file *file = get_empty_filp();
133
134 if (!file)
135 return ERR_PTR(-ENFILE);
136
137 file->f_path.mnt = mntget(kvmfs_mnt);
138 file->f_path.dentry = d_alloc_anon(inode);
139 if (!file->f_path.dentry)
140 return ERR_PTR(-ENOMEM);
141 file->f_mapping = inode->i_mapping;
142
143 file->f_pos = 0;
144 file->f_flags = O_RDWR;
145 file->f_op = inode->i_fop;
146 file->f_mode = FMODE_READ | FMODE_WRITE;
147 file->f_version = 0;
148 file->private_data = private_data;
149 return file;
150}
151
152unsigned long segment_base(u16 selector) 106unsigned long segment_base(u16 selector)
153{ 107{
154 struct descriptor_table gdt; 108 struct descriptor_table gdt;
@@ -307,6 +261,48 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
307 mutex_unlock(&vcpu->mutex); 261 mutex_unlock(&vcpu->mutex);
308} 262}
309 263
264static void ack_flush(void *_completed)
265{
266 atomic_t *completed = _completed;
267
268 atomic_inc(completed);
269}
270
271void kvm_flush_remote_tlbs(struct kvm *kvm)
272{
273 int i, cpu, needed;
274 cpumask_t cpus;
275 struct kvm_vcpu *vcpu;
276 atomic_t completed;
277
278 atomic_set(&completed, 0);
279 cpus_clear(cpus);
280 needed = 0;
281 for (i = 0; i < kvm->nvcpus; ++i) {
282 vcpu = &kvm->vcpus[i];
283 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
284 continue;
285 cpu = vcpu->cpu;
286 if (cpu != -1 && cpu != raw_smp_processor_id())
287 if (!cpu_isset(cpu, cpus)) {
288 cpu_set(cpu, cpus);
289 ++needed;
290 }
291 }
292
293 /*
294 * We really want smp_call_function_mask() here. But that's not
295 * available, so ipi all cpus in parallel and wait for them
296 * to complete.
297 */
298 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
299 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
300 while (atomic_read(&completed) != needed) {
301 cpu_relax();
302 barrier();
303 }
304}
305
310static struct kvm *kvm_create_vm(void) 306static struct kvm *kvm_create_vm(void)
311{ 307{
312 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 308 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
@@ -315,8 +311,13 @@ static struct kvm *kvm_create_vm(void)
315 if (!kvm) 311 if (!kvm)
316 return ERR_PTR(-ENOMEM); 312 return ERR_PTR(-ENOMEM);
317 313
314 kvm_io_bus_init(&kvm->pio_bus);
318 spin_lock_init(&kvm->lock); 315 spin_lock_init(&kvm->lock);
319 INIT_LIST_HEAD(&kvm->active_mmu_pages); 316 INIT_LIST_HEAD(&kvm->active_mmu_pages);
317 spin_lock(&kvm_lock);
318 list_add(&kvm->vm_list, &vm_list);
319 spin_unlock(&kvm_lock);
320 kvm_io_bus_init(&kvm->mmio_bus);
320 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 321 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
321 struct kvm_vcpu *vcpu = &kvm->vcpus[i]; 322 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
322 323
@@ -324,10 +325,6 @@ static struct kvm *kvm_create_vm(void)
324 vcpu->cpu = -1; 325 vcpu->cpu = -1;
325 vcpu->kvm = kvm; 326 vcpu->kvm = kvm;
326 vcpu->mmu.root_hpa = INVALID_PAGE; 327 vcpu->mmu.root_hpa = INVALID_PAGE;
327 INIT_LIST_HEAD(&vcpu->free_pages);
328 spin_lock(&kvm_lock);
329 list_add(&kvm->vm_list, &vm_list);
330 spin_unlock(&kvm_lock);
331 } 328 }
332 return kvm; 329 return kvm;
333} 330}
@@ -380,6 +377,16 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
380 } 377 }
381} 378}
382 379
380static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
381{
382 if (!vcpu->vmcs)
383 return;
384
385 vcpu_load(vcpu);
386 kvm_mmu_unload(vcpu);
387 vcpu_put(vcpu);
388}
389
383static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 390static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
384{ 391{
385 if (!vcpu->vmcs) 392 if (!vcpu->vmcs)
@@ -400,6 +407,11 @@ static void kvm_free_vcpus(struct kvm *kvm)
400{ 407{
401 unsigned int i; 408 unsigned int i;
402 409
410 /*
411 * Unpin any mmu pages first.
412 */
413 for (i = 0; i < KVM_MAX_VCPUS; ++i)
414 kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
403 for (i = 0; i < KVM_MAX_VCPUS; ++i) 415 for (i = 0; i < KVM_MAX_VCPUS; ++i)
404 kvm_free_vcpu(&kvm->vcpus[i]); 416 kvm_free_vcpu(&kvm->vcpus[i]);
405} 417}
@@ -414,6 +426,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
414 spin_lock(&kvm_lock); 426 spin_lock(&kvm_lock);
415 list_del(&kvm->vm_list); 427 list_del(&kvm->vm_list);
416 spin_unlock(&kvm_lock); 428 spin_unlock(&kvm_lock);
429 kvm_io_bus_destroy(&kvm->pio_bus);
430 kvm_io_bus_destroy(&kvm->mmio_bus);
417 kvm_free_vcpus(kvm); 431 kvm_free_vcpus(kvm);
418 kvm_free_physmem(kvm); 432 kvm_free_physmem(kvm);
419 kfree(kvm); 433 kfree(kvm);
@@ -969,7 +983,7 @@ EXPORT_SYMBOL_GPL(gfn_to_page);
969void mark_page_dirty(struct kvm *kvm, gfn_t gfn) 983void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
970{ 984{
971 int i; 985 int i;
972 struct kvm_memory_slot *memslot = NULL; 986 struct kvm_memory_slot *memslot;
973 unsigned long rel_gfn; 987 unsigned long rel_gfn;
974 988
975 for (i = 0; i < kvm->nmemslots; ++i) { 989 for (i = 0; i < kvm->nmemslots; ++i) {
@@ -978,7 +992,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
978 if (gfn >= memslot->base_gfn 992 if (gfn >= memslot->base_gfn
979 && gfn < memslot->base_gfn + memslot->npages) { 993 && gfn < memslot->base_gfn + memslot->npages) {
980 994
981 if (!memslot || !memslot->dirty_bitmap) 995 if (!memslot->dirty_bitmap)
982 return; 996 return;
983 997
984 rel_gfn = gfn - memslot->base_gfn; 998 rel_gfn = gfn - memslot->base_gfn;
@@ -1037,12 +1051,31 @@ static int emulator_write_std(unsigned long addr,
1037 return X86EMUL_UNHANDLEABLE; 1051 return X86EMUL_UNHANDLEABLE;
1038} 1052}
1039 1053
1054static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1055 gpa_t addr)
1056{
1057 /*
1058 * Note that its important to have this wrapper function because
1059 * in the very near future we will be checking for MMIOs against
1060 * the LAPIC as well as the general MMIO bus
1061 */
1062 return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1063}
1064
1065static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1066 gpa_t addr)
1067{
1068 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1069}
1070
1040static int emulator_read_emulated(unsigned long addr, 1071static int emulator_read_emulated(unsigned long addr,
1041 void *val, 1072 void *val,
1042 unsigned int bytes, 1073 unsigned int bytes,
1043 struct x86_emulate_ctxt *ctxt) 1074 struct x86_emulate_ctxt *ctxt)
1044{ 1075{
1045 struct kvm_vcpu *vcpu = ctxt->vcpu; 1076 struct kvm_vcpu *vcpu = ctxt->vcpu;
1077 struct kvm_io_device *mmio_dev;
1078 gpa_t gpa;
1046 1079
1047 if (vcpu->mmio_read_completed) { 1080 if (vcpu->mmio_read_completed) {
1048 memcpy(val, vcpu->mmio_data, bytes); 1081 memcpy(val, vcpu->mmio_data, bytes);
@@ -1051,18 +1084,26 @@ static int emulator_read_emulated(unsigned long addr,
1051 } else if (emulator_read_std(addr, val, bytes, ctxt) 1084 } else if (emulator_read_std(addr, val, bytes, ctxt)
1052 == X86EMUL_CONTINUE) 1085 == X86EMUL_CONTINUE)
1053 return X86EMUL_CONTINUE; 1086 return X86EMUL_CONTINUE;
1054 else {
1055 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1056 1087
1057 if (gpa == UNMAPPED_GVA) 1088 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1058 return X86EMUL_PROPAGATE_FAULT; 1089 if (gpa == UNMAPPED_GVA)
1059 vcpu->mmio_needed = 1; 1090 return X86EMUL_PROPAGATE_FAULT;
1060 vcpu->mmio_phys_addr = gpa;
1061 vcpu->mmio_size = bytes;
1062 vcpu->mmio_is_write = 0;
1063 1091
1064 return X86EMUL_UNHANDLEABLE; 1092 /*
1093 * Is this MMIO handled locally?
1094 */
1095 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1096 if (mmio_dev) {
1097 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1098 return X86EMUL_CONTINUE;
1065 } 1099 }
1100
1101 vcpu->mmio_needed = 1;
1102 vcpu->mmio_phys_addr = gpa;
1103 vcpu->mmio_size = bytes;
1104 vcpu->mmio_is_write = 0;
1105
1106 return X86EMUL_UNHANDLEABLE;
1066} 1107}
1067 1108
1068static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1109static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1070,18 +1111,20 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1070{ 1111{
1071 struct page *page; 1112 struct page *page;
1072 void *virt; 1113 void *virt;
1114 unsigned offset = offset_in_page(gpa);
1073 1115
1074 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) 1116 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1075 return 0; 1117 return 0;
1076 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1118 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1077 if (!page) 1119 if (!page)
1078 return 0; 1120 return 0;
1079 kvm_mmu_pre_write(vcpu, gpa, bytes);
1080 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); 1121 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
1081 virt = kmap_atomic(page, KM_USER0); 1122 virt = kmap_atomic(page, KM_USER0);
1082 memcpy(virt + offset_in_page(gpa), val, bytes); 1123 if (memcmp(virt + offset_in_page(gpa), val, bytes)) {
1124 kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
1125 memcpy(virt + offset_in_page(gpa), val, bytes);
1126 }
1083 kunmap_atomic(virt, KM_USER0); 1127 kunmap_atomic(virt, KM_USER0);
1084 kvm_mmu_post_write(vcpu, gpa, bytes);
1085 return 1; 1128 return 1;
1086} 1129}
1087 1130
@@ -1090,8 +1133,9 @@ static int emulator_write_emulated(unsigned long addr,
1090 unsigned int bytes, 1133 unsigned int bytes,
1091 struct x86_emulate_ctxt *ctxt) 1134 struct x86_emulate_ctxt *ctxt)
1092{ 1135{
1093 struct kvm_vcpu *vcpu = ctxt->vcpu; 1136 struct kvm_vcpu *vcpu = ctxt->vcpu;
1094 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); 1137 struct kvm_io_device *mmio_dev;
1138 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1095 1139
1096 if (gpa == UNMAPPED_GVA) { 1140 if (gpa == UNMAPPED_GVA) {
1097 kvm_arch_ops->inject_page_fault(vcpu, addr, 2); 1141 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
@@ -1101,6 +1145,15 @@ static int emulator_write_emulated(unsigned long addr,
1101 if (emulator_write_phys(vcpu, gpa, val, bytes)) 1145 if (emulator_write_phys(vcpu, gpa, val, bytes))
1102 return X86EMUL_CONTINUE; 1146 return X86EMUL_CONTINUE;
1103 1147
1148 /*
1149 * Is this MMIO handled locally?
1150 */
1151 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1152 if (mmio_dev) {
1153 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1154 return X86EMUL_CONTINUE;
1155 }
1156
1104 vcpu->mmio_needed = 1; 1157 vcpu->mmio_needed = 1;
1105 vcpu->mmio_phys_addr = gpa; 1158 vcpu->mmio_phys_addr = gpa;
1106 vcpu->mmio_size = bytes; 1159 vcpu->mmio_size = bytes;
@@ -1269,6 +1322,17 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
1269} 1322}
1270EXPORT_SYMBOL_GPL(emulate_instruction); 1323EXPORT_SYMBOL_GPL(emulate_instruction);
1271 1324
1325int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1326{
1327 if (vcpu->irq_summary)
1328 return 1;
1329
1330 vcpu->run->exit_reason = KVM_EXIT_HLT;
1331 ++vcpu->stat.halt_exits;
1332 return 0;
1333}
1334EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1335
1272int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run) 1336int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1273{ 1337{
1274 unsigned long nr, a0, a1, a2, a3, a4, a5, ret; 1338 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
@@ -1469,6 +1533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1469 case MSR_IA32_MC0_MISC+16: 1533 case MSR_IA32_MC0_MISC+16:
1470 case MSR_IA32_UCODE_REV: 1534 case MSR_IA32_UCODE_REV:
1471 case MSR_IA32_PERF_STATUS: 1535 case MSR_IA32_PERF_STATUS:
1536 case MSR_IA32_EBL_CR_POWERON:
1472 /* MTRR registers */ 1537 /* MTRR registers */
1473 case 0xfe: 1538 case 0xfe:
1474 case 0x200 ... 0x2ff: 1539 case 0x200 ... 0x2ff:
@@ -1727,6 +1792,20 @@ static int complete_pio(struct kvm_vcpu *vcpu)
1727 return 0; 1792 return 0;
1728} 1793}
1729 1794
1795void kernel_pio(struct kvm_io_device *pio_dev, struct kvm_vcpu *vcpu)
1796{
1797 /* TODO: String I/O for in kernel device */
1798
1799 if (vcpu->pio.in)
1800 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1801 vcpu->pio.size,
1802 vcpu->pio_data);
1803 else
1804 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1805 vcpu->pio.size,
1806 vcpu->pio_data);
1807}
1808
1730int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 1809int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1731 int size, unsigned long count, int string, int down, 1810 int size, unsigned long count, int string, int down,
1732 gva_t address, int rep, unsigned port) 1811 gva_t address, int rep, unsigned port)
@@ -1735,6 +1814,7 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1735 int i; 1814 int i;
1736 int nr_pages = 1; 1815 int nr_pages = 1;
1737 struct page *page; 1816 struct page *page;
1817 struct kvm_io_device *pio_dev;
1738 1818
1739 vcpu->run->exit_reason = KVM_EXIT_IO; 1819 vcpu->run->exit_reason = KVM_EXIT_IO;
1740 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 1820 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -1746,17 +1826,27 @@ int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1746 vcpu->pio.cur_count = count; 1826 vcpu->pio.cur_count = count;
1747 vcpu->pio.size = size; 1827 vcpu->pio.size = size;
1748 vcpu->pio.in = in; 1828 vcpu->pio.in = in;
1829 vcpu->pio.port = port;
1749 vcpu->pio.string = string; 1830 vcpu->pio.string = string;
1750 vcpu->pio.down = down; 1831 vcpu->pio.down = down;
1751 vcpu->pio.guest_page_offset = offset_in_page(address); 1832 vcpu->pio.guest_page_offset = offset_in_page(address);
1752 vcpu->pio.rep = rep; 1833 vcpu->pio.rep = rep;
1753 1834
1835 pio_dev = vcpu_find_pio_dev(vcpu, port);
1754 if (!string) { 1836 if (!string) {
1755 kvm_arch_ops->cache_regs(vcpu); 1837 kvm_arch_ops->cache_regs(vcpu);
1756 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4); 1838 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1757 kvm_arch_ops->decache_regs(vcpu); 1839 kvm_arch_ops->decache_regs(vcpu);
1840 if (pio_dev) {
1841 kernel_pio(pio_dev, vcpu);
1842 complete_pio(vcpu);
1843 return 1;
1844 }
1758 return 0; 1845 return 0;
1759 } 1846 }
1847 /* TODO: String I/O for in kernel device */
1848 if (pio_dev)
1849 printk(KERN_ERR "kvm_setup_pio: no string io support\n");
1760 1850
1761 if (!count) { 1851 if (!count) {
1762 kvm_arch_ops->skip_emulated_instruction(vcpu); 1852 kvm_arch_ops->skip_emulated_instruction(vcpu);
@@ -2273,34 +2363,12 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2273 struct inode *inode; 2363 struct inode *inode;
2274 struct file *file; 2364 struct file *file;
2275 2365
2366 r = anon_inode_getfd(&fd, &inode, &file,
2367 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2368 if (r)
2369 return r;
2276 atomic_inc(&vcpu->kvm->filp->f_count); 2370 atomic_inc(&vcpu->kvm->filp->f_count);
2277 inode = kvmfs_inode(&kvm_vcpu_fops);
2278 if (IS_ERR(inode)) {
2279 r = PTR_ERR(inode);
2280 goto out1;
2281 }
2282
2283 file = kvmfs_file(inode, vcpu);
2284 if (IS_ERR(file)) {
2285 r = PTR_ERR(file);
2286 goto out2;
2287 }
2288
2289 r = get_unused_fd();
2290 if (r < 0)
2291 goto out3;
2292 fd = r;
2293 fd_install(fd, file);
2294
2295 return fd; 2371 return fd;
2296
2297out3:
2298 fput(file);
2299out2:
2300 iput(inode);
2301out1:
2302 fput(vcpu->kvm->filp);
2303 return r;
2304} 2372}
2305 2373
2306/* 2374/*
@@ -2363,6 +2431,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2363 if (r < 0) 2431 if (r < 0)
2364 goto out_free_vcpus; 2432 goto out_free_vcpus;
2365 2433
2434 spin_lock(&kvm_lock);
2435 if (n >= kvm->nvcpus)
2436 kvm->nvcpus = n + 1;
2437 spin_unlock(&kvm_lock);
2438
2366 return r; 2439 return r;
2367 2440
2368out_free_vcpus: 2441out_free_vcpus:
@@ -2376,6 +2449,27 @@ out:
2376 return r; 2449 return r;
2377} 2450}
2378 2451
2452static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2453{
2454 u64 efer;
2455 int i;
2456 struct kvm_cpuid_entry *e, *entry;
2457
2458 rdmsrl(MSR_EFER, efer);
2459 entry = NULL;
2460 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2461 e = &vcpu->cpuid_entries[i];
2462 if (e->function == 0x80000001) {
2463 entry = e;
2464 break;
2465 }
2466 }
2467 if (entry && (entry->edx & EFER_NX) && !(efer & EFER_NX)) {
2468 entry->edx &= ~(1 << 20);
2469 printk(KERN_INFO ": guest NX capability removed\n");
2470 }
2471}
2472
2379static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, 2473static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2380 struct kvm_cpuid *cpuid, 2474 struct kvm_cpuid *cpuid,
2381 struct kvm_cpuid_entry __user *entries) 2475 struct kvm_cpuid_entry __user *entries)
@@ -2390,6 +2484,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2390 cpuid->nent * sizeof(struct kvm_cpuid_entry))) 2484 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2391 goto out; 2485 goto out;
2392 vcpu->cpuid_nent = cpuid->nent; 2486 vcpu->cpuid_nent = cpuid->nent;
2487 cpuid_fix_nx_cap(vcpu);
2393 return 0; 2488 return 0;
2394 2489
2395out: 2490out:
@@ -2738,41 +2833,18 @@ static int kvm_dev_ioctl_create_vm(void)
2738 struct file *file; 2833 struct file *file;
2739 struct kvm *kvm; 2834 struct kvm *kvm;
2740 2835
2741 inode = kvmfs_inode(&kvm_vm_fops);
2742 if (IS_ERR(inode)) {
2743 r = PTR_ERR(inode);
2744 goto out1;
2745 }
2746
2747 kvm = kvm_create_vm(); 2836 kvm = kvm_create_vm();
2748 if (IS_ERR(kvm)) { 2837 if (IS_ERR(kvm))
2749 r = PTR_ERR(kvm); 2838 return PTR_ERR(kvm);
2750 goto out2; 2839 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2840 if (r) {
2841 kvm_destroy_vm(kvm);
2842 return r;
2751 } 2843 }
2752 2844
2753 file = kvmfs_file(inode, kvm);
2754 if (IS_ERR(file)) {
2755 r = PTR_ERR(file);
2756 goto out3;
2757 }
2758 kvm->filp = file; 2845 kvm->filp = file;
2759 2846
2760 r = get_unused_fd();
2761 if (r < 0)
2762 goto out4;
2763 fd = r;
2764 fd_install(fd, file);
2765
2766 return fd; 2847 return fd;
2767
2768out4:
2769 fput(file);
2770out3:
2771 kvm_destroy_vm(kvm);
2772out2:
2773 iput(inode);
2774out1:
2775 return r;
2776} 2848}
2777 2849
2778static long kvm_dev_ioctl(struct file *filp, 2850static long kvm_dev_ioctl(struct file *filp,
@@ -2862,7 +2934,7 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2862 * in vmx root mode. 2934 * in vmx root mode.
2863 */ 2935 */
2864 printk(KERN_INFO "kvm: exiting hardware virtualization\n"); 2936 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2865 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); 2937 on_each_cpu(hardware_disable, NULL, 0, 1);
2866 } 2938 }
2867 return NOTIFY_OK; 2939 return NOTIFY_OK;
2868} 2940}
@@ -2905,33 +2977,88 @@ static void decache_vcpus_on_cpu(int cpu)
2905 spin_unlock(&kvm_lock); 2977 spin_unlock(&kvm_lock);
2906} 2978}
2907 2979
2980static void hardware_enable(void *junk)
2981{
2982 int cpu = raw_smp_processor_id();
2983
2984 if (cpu_isset(cpu, cpus_hardware_enabled))
2985 return;
2986 cpu_set(cpu, cpus_hardware_enabled);
2987 kvm_arch_ops->hardware_enable(NULL);
2988}
2989
2990static void hardware_disable(void *junk)
2991{
2992 int cpu = raw_smp_processor_id();
2993
2994 if (!cpu_isset(cpu, cpus_hardware_enabled))
2995 return;
2996 cpu_clear(cpu, cpus_hardware_enabled);
2997 decache_vcpus_on_cpu(cpu);
2998 kvm_arch_ops->hardware_disable(NULL);
2999}
3000
2908static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, 3001static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2909 void *v) 3002 void *v)
2910{ 3003{
2911 int cpu = (long)v; 3004 int cpu = (long)v;
2912 3005
2913 switch (val) { 3006 switch (val) {
2914 case CPU_DOWN_PREPARE: 3007 case CPU_DYING:
2915 case CPU_DOWN_PREPARE_FROZEN: 3008 case CPU_DYING_FROZEN:
2916 case CPU_UP_CANCELED: 3009 case CPU_UP_CANCELED:
2917 case CPU_UP_CANCELED_FROZEN: 3010 case CPU_UP_CANCELED_FROZEN:
2918 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n", 3011 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2919 cpu); 3012 cpu);
2920 decache_vcpus_on_cpu(cpu); 3013 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
2921 smp_call_function_single(cpu, kvm_arch_ops->hardware_disable,
2922 NULL, 0, 1);
2923 break; 3014 break;
2924 case CPU_ONLINE: 3015 case CPU_ONLINE:
2925 case CPU_ONLINE_FROZEN: 3016 case CPU_ONLINE_FROZEN:
2926 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n", 3017 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2927 cpu); 3018 cpu);
2928 smp_call_function_single(cpu, kvm_arch_ops->hardware_enable, 3019 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
2929 NULL, 0, 1);
2930 break; 3020 break;
2931 } 3021 }
2932 return NOTIFY_OK; 3022 return NOTIFY_OK;
2933} 3023}
2934 3024
3025void kvm_io_bus_init(struct kvm_io_bus *bus)
3026{
3027 memset(bus, 0, sizeof(*bus));
3028}
3029
3030void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3031{
3032 int i;
3033
3034 for (i = 0; i < bus->dev_count; i++) {
3035 struct kvm_io_device *pos = bus->devs[i];
3036
3037 kvm_iodevice_destructor(pos);
3038 }
3039}
3040
3041struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3042{
3043 int i;
3044
3045 for (i = 0; i < bus->dev_count; i++) {
3046 struct kvm_io_device *pos = bus->devs[i];
3047
3048 if (pos->in_range(pos, addr))
3049 return pos;
3050 }
3051
3052 return NULL;
3053}
3054
3055void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3056{
3057 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3058
3059 bus->devs[bus->dev_count++] = dev;
3060}
3061
2935static struct notifier_block kvm_cpu_notifier = { 3062static struct notifier_block kvm_cpu_notifier = {
2936 .notifier_call = kvm_cpu_hotplug, 3063 .notifier_call = kvm_cpu_hotplug,
2937 .priority = 20, /* must be > scheduler priority */ 3064 .priority = 20, /* must be > scheduler priority */
@@ -2983,14 +3110,13 @@ static void kvm_exit_debug(void)
2983 3110
2984static int kvm_suspend(struct sys_device *dev, pm_message_t state) 3111static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2985{ 3112{
2986 decache_vcpus_on_cpu(raw_smp_processor_id()); 3113 hardware_disable(NULL);
2987 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1);
2988 return 0; 3114 return 0;
2989} 3115}
2990 3116
2991static int kvm_resume(struct sys_device *dev) 3117static int kvm_resume(struct sys_device *dev)
2992{ 3118{
2993 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); 3119 hardware_enable(NULL);
2994 return 0; 3120 return 0;
2995} 3121}
2996 3122
@@ -3007,18 +3133,6 @@ static struct sys_device kvm_sysdev = {
3007 3133
3008hpa_t bad_page_address; 3134hpa_t bad_page_address;
3009 3135
3010static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
3011 const char *dev_name, void *data, struct vfsmount *mnt)
3012{
3013 return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
3014}
3015
3016static struct file_system_type kvm_fs_type = {
3017 .name = "kvmfs",
3018 .get_sb = kvmfs_get_sb,
3019 .kill_sb = kill_anon_super,
3020};
3021
3022int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) 3136int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3023{ 3137{
3024 int r; 3138 int r;
@@ -3043,7 +3157,7 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3043 if (r < 0) 3157 if (r < 0)
3044 goto out; 3158 goto out;
3045 3159
3046 on_each_cpu(kvm_arch_ops->hardware_enable, NULL, 0, 1); 3160 on_each_cpu(hardware_enable, NULL, 0, 1);
3047 r = register_cpu_notifier(&kvm_cpu_notifier); 3161 r = register_cpu_notifier(&kvm_cpu_notifier);
3048 if (r) 3162 if (r)
3049 goto out_free_1; 3163 goto out_free_1;
@@ -3075,7 +3189,7 @@ out_free_2:
3075 unregister_reboot_notifier(&kvm_reboot_notifier); 3189 unregister_reboot_notifier(&kvm_reboot_notifier);
3076 unregister_cpu_notifier(&kvm_cpu_notifier); 3190 unregister_cpu_notifier(&kvm_cpu_notifier);
3077out_free_1: 3191out_free_1:
3078 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); 3192 on_each_cpu(hardware_disable, NULL, 0, 1);
3079 kvm_arch_ops->hardware_unsetup(); 3193 kvm_arch_ops->hardware_unsetup();
3080out: 3194out:
3081 kvm_arch_ops = NULL; 3195 kvm_arch_ops = NULL;
@@ -3089,7 +3203,7 @@ void kvm_exit_arch(void)
3089 sysdev_class_unregister(&kvm_sysdev_class); 3203 sysdev_class_unregister(&kvm_sysdev_class);
3090 unregister_reboot_notifier(&kvm_reboot_notifier); 3204 unregister_reboot_notifier(&kvm_reboot_notifier);
3091 unregister_cpu_notifier(&kvm_cpu_notifier); 3205 unregister_cpu_notifier(&kvm_cpu_notifier);
3092 on_each_cpu(kvm_arch_ops->hardware_disable, NULL, 0, 1); 3206 on_each_cpu(hardware_disable, NULL, 0, 1);
3093 kvm_arch_ops->hardware_unsetup(); 3207 kvm_arch_ops->hardware_unsetup();
3094 kvm_arch_ops = NULL; 3208 kvm_arch_ops = NULL;
3095} 3209}
@@ -3103,14 +3217,6 @@ static __init int kvm_init(void)
3103 if (r) 3217 if (r)
3104 goto out4; 3218 goto out4;
3105 3219
3106 r = register_filesystem(&kvm_fs_type);
3107 if (r)
3108 goto out3;
3109
3110 kvmfs_mnt = kern_mount(&kvm_fs_type);
3111 r = PTR_ERR(kvmfs_mnt);
3112 if (IS_ERR(kvmfs_mnt))
3113 goto out2;
3114 kvm_init_debug(); 3220 kvm_init_debug();
3115 3221
3116 kvm_init_msr_list(); 3222 kvm_init_msr_list();
@@ -3127,10 +3233,6 @@ static __init int kvm_init(void)
3127 3233
3128out: 3234out:
3129 kvm_exit_debug(); 3235 kvm_exit_debug();
3130 mntput(kvmfs_mnt);
3131out2:
3132 unregister_filesystem(&kvm_fs_type);
3133out3:
3134 kvm_mmu_module_exit(); 3236 kvm_mmu_module_exit();
3135out4: 3237out4:
3136 return r; 3238 return r;
@@ -3140,8 +3242,6 @@ static __exit void kvm_exit(void)
3140{ 3242{
3141 kvm_exit_debug(); 3243 kvm_exit_debug();
3142 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT)); 3244 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
3143 mntput(kvmfs_mnt);
3144 unregister_filesystem(&kvm_fs_type);
3145 kvm_mmu_module_exit(); 3245 kvm_mmu_module_exit();
3146} 3246}
3147 3247
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index e8e228118de9..b297a6b111ac 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -16,15 +16,18 @@
16 * the COPYING file in the top-level directory. 16 * the COPYING file in the top-level directory.
17 * 17 *
18 */ 18 */
19
20#include "vmx.h"
21#include "kvm.h"
22
19#include <linux/types.h> 23#include <linux/types.h>
20#include <linux/string.h> 24#include <linux/string.h>
21#include <asm/page.h>
22#include <linux/mm.h> 25#include <linux/mm.h>
23#include <linux/highmem.h> 26#include <linux/highmem.h>
24#include <linux/module.h> 27#include <linux/module.h>
25 28
26#include "vmx.h" 29#include <asm/page.h>
27#include "kvm.h" 30#include <asm/cmpxchg.h>
28 31
29#undef MMU_DEBUG 32#undef MMU_DEBUG
30 33
@@ -90,25 +93,11 @@ static int dbg = 1;
90#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) 93#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
91 94
92 95
93#define PT32_PTE_COPY_MASK \
94 (PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK | PT_GLOBAL_MASK)
95
96#define PT64_PTE_COPY_MASK (PT64_NX_MASK | PT32_PTE_COPY_MASK)
97
98#define PT_FIRST_AVAIL_BITS_SHIFT 9 96#define PT_FIRST_AVAIL_BITS_SHIFT 9
99#define PT64_SECOND_AVAIL_BITS_SHIFT 52 97#define PT64_SECOND_AVAIL_BITS_SHIFT 52
100 98
101#define PT_SHADOW_PS_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
102#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) 99#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
103 100
104#define PT_SHADOW_WRITABLE_SHIFT (PT_FIRST_AVAIL_BITS_SHIFT + 1)
105#define PT_SHADOW_WRITABLE_MASK (1ULL << PT_SHADOW_WRITABLE_SHIFT)
106
107#define PT_SHADOW_USER_SHIFT (PT_SHADOW_WRITABLE_SHIFT + 1)
108#define PT_SHADOW_USER_MASK (1ULL << (PT_SHADOW_USER_SHIFT))
109
110#define PT_SHADOW_BITS_OFFSET (PT_SHADOW_WRITABLE_SHIFT - PT_WRITABLE_SHIFT)
111
112#define VALID_PAGE(x) ((x) != INVALID_PAGE) 101#define VALID_PAGE(x) ((x) != INVALID_PAGE)
113 102
114#define PT64_LEVEL_BITS 9 103#define PT64_LEVEL_BITS 9
@@ -165,6 +154,8 @@ struct kvm_rmap_desc {
165 154
166static struct kmem_cache *pte_chain_cache; 155static struct kmem_cache *pte_chain_cache;
167static struct kmem_cache *rmap_desc_cache; 156static struct kmem_cache *rmap_desc_cache;
157static struct kmem_cache *mmu_page_cache;
158static struct kmem_cache *mmu_page_header_cache;
168 159
169static int is_write_protection(struct kvm_vcpu *vcpu) 160static int is_write_protection(struct kvm_vcpu *vcpu)
170{ 161{
@@ -202,6 +193,15 @@ static int is_rmap_pte(u64 pte)
202 == (PT_WRITABLE_MASK | PT_PRESENT_MASK); 193 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
203} 194}
204 195
196static void set_shadow_pte(u64 *sptep, u64 spte)
197{
198#ifdef CONFIG_X86_64
199 set_64bit((unsigned long *)sptep, spte);
200#else
201 set_64bit((unsigned long long *)sptep, spte);
202#endif
203}
204
205static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 205static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
206 struct kmem_cache *base_cache, int min, 206 struct kmem_cache *base_cache, int min,
207 gfp_t gfp_flags) 207 gfp_t gfp_flags)
@@ -235,6 +235,14 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
235 goto out; 235 goto out;
236 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, 236 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
237 rmap_desc_cache, 1, gfp_flags); 237 rmap_desc_cache, 1, gfp_flags);
238 if (r)
239 goto out;
240 r = mmu_topup_memory_cache(&vcpu->mmu_page_cache,
241 mmu_page_cache, 4, gfp_flags);
242 if (r)
243 goto out;
244 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
245 mmu_page_header_cache, 4, gfp_flags);
238out: 246out:
239 return r; 247 return r;
240} 248}
@@ -258,6 +266,8 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
258{ 266{
259 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); 267 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
260 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); 268 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
269 mmu_free_memory_cache(&vcpu->mmu_page_cache);
270 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
261} 271}
262 272
263static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, 273static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
@@ -433,19 +443,18 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
433 BUG_ON(!(*spte & PT_WRITABLE_MASK)); 443 BUG_ON(!(*spte & PT_WRITABLE_MASK));
434 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 444 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
435 rmap_remove(vcpu, spte); 445 rmap_remove(vcpu, spte);
436 kvm_arch_ops->tlb_flush(vcpu); 446 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
437 *spte &= ~(u64)PT_WRITABLE_MASK; 447 kvm_flush_remote_tlbs(vcpu->kvm);
438 } 448 }
439} 449}
440 450
441#ifdef MMU_DEBUG 451#ifdef MMU_DEBUG
442static int is_empty_shadow_page(hpa_t page_hpa) 452static int is_empty_shadow_page(u64 *spt)
443{ 453{
444 u64 *pos; 454 u64 *pos;
445 u64 *end; 455 u64 *end;
446 456
447 for (pos = __va(page_hpa), end = pos + PAGE_SIZE / sizeof(u64); 457 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
448 pos != end; pos++)
449 if (*pos != 0) { 458 if (*pos != 0) {
450 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, 459 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
451 pos, *pos); 460 pos, *pos);
@@ -455,13 +464,13 @@ static int is_empty_shadow_page(hpa_t page_hpa)
455} 464}
456#endif 465#endif
457 466
458static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa) 467static void kvm_mmu_free_page(struct kvm_vcpu *vcpu,
468 struct kvm_mmu_page *page_head)
459{ 469{
460 struct kvm_mmu_page *page_head = page_header(page_hpa); 470 ASSERT(is_empty_shadow_page(page_head->spt));
461 471 list_del(&page_head->link);
462 ASSERT(is_empty_shadow_page(page_hpa)); 472 mmu_memory_cache_free(&vcpu->mmu_page_cache, page_head->spt);
463 page_head->page_hpa = page_hpa; 473 mmu_memory_cache_free(&vcpu->mmu_page_header_cache, page_head);
464 list_move(&page_head->link, &vcpu->free_pages);
465 ++vcpu->kvm->n_free_mmu_pages; 474 ++vcpu->kvm->n_free_mmu_pages;
466} 475}
467 476
@@ -475,12 +484,15 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
475{ 484{
476 struct kvm_mmu_page *page; 485 struct kvm_mmu_page *page;
477 486
478 if (list_empty(&vcpu->free_pages)) 487 if (!vcpu->kvm->n_free_mmu_pages)
479 return NULL; 488 return NULL;
480 489
481 page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link); 490 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
482 list_move(&page->link, &vcpu->kvm->active_mmu_pages); 491 sizeof *page);
483 ASSERT(is_empty_shadow_page(page->page_hpa)); 492 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
493 set_page_private(virt_to_page(page->spt), (unsigned long)page);
494 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
495 ASSERT(is_empty_shadow_page(page->spt));
484 page->slot_bitmap = 0; 496 page->slot_bitmap = 0;
485 page->multimapped = 0; 497 page->multimapped = 0;
486 page->parent_pte = parent_pte; 498 page->parent_pte = parent_pte;
@@ -638,7 +650,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
638 u64 *pt; 650 u64 *pt;
639 u64 ent; 651 u64 ent;
640 652
641 pt = __va(page->page_hpa); 653 pt = page->spt;
642 654
643 if (page->role.level == PT_PAGE_TABLE_LEVEL) { 655 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
644 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 656 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
@@ -646,7 +658,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
646 rmap_remove(vcpu, &pt[i]); 658 rmap_remove(vcpu, &pt[i]);
647 pt[i] = 0; 659 pt[i] = 0;
648 } 660 }
649 kvm_arch_ops->tlb_flush(vcpu); 661 kvm_flush_remote_tlbs(vcpu->kvm);
650 return; 662 return;
651 } 663 }
652 664
@@ -659,6 +671,7 @@ static void kvm_mmu_page_unlink_children(struct kvm_vcpu *vcpu,
659 ent &= PT64_BASE_ADDR_MASK; 671 ent &= PT64_BASE_ADDR_MASK;
660 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]); 672 mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
661 } 673 }
674 kvm_flush_remote_tlbs(vcpu->kvm);
662} 675}
663 676
664static void kvm_mmu_put_page(struct kvm_vcpu *vcpu, 677static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
@@ -685,12 +698,12 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
685 } 698 }
686 BUG_ON(!parent_pte); 699 BUG_ON(!parent_pte);
687 kvm_mmu_put_page(vcpu, page, parent_pte); 700 kvm_mmu_put_page(vcpu, page, parent_pte);
688 *parent_pte = 0; 701 set_shadow_pte(parent_pte, 0);
689 } 702 }
690 kvm_mmu_page_unlink_children(vcpu, page); 703 kvm_mmu_page_unlink_children(vcpu, page);
691 if (!page->root_count) { 704 if (!page->root_count) {
692 hlist_del(&page->hash_link); 705 hlist_del(&page->hash_link);
693 kvm_mmu_free_page(vcpu, page->page_hpa); 706 kvm_mmu_free_page(vcpu, page);
694 } else 707 } else
695 list_move(&page->link, &vcpu->kvm->active_mmu_pages); 708 list_move(&page->link, &vcpu->kvm->active_mmu_pages);
696} 709}
@@ -717,6 +730,17 @@ static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
717 return r; 730 return r;
718} 731}
719 732
733static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
734{
735 struct kvm_mmu_page *page;
736
737 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
738 pgprintk("%s: zap %lx %x\n",
739 __FUNCTION__, gfn, page->role.word);
740 kvm_mmu_zap_page(vcpu, page);
741 }
742}
743
720static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa) 744static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
721{ 745{
722 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT)); 746 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
@@ -805,7 +829,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
805 return -ENOMEM; 829 return -ENOMEM;
806 } 830 }
807 831
808 table[index] = new_table->page_hpa | PT_PRESENT_MASK 832 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
809 | PT_WRITABLE_MASK | PT_USER_MASK; 833 | PT_WRITABLE_MASK | PT_USER_MASK;
810 } 834 }
811 table_addr = table[index] & PT64_BASE_ADDR_MASK; 835 table_addr = table[index] & PT64_BASE_ADDR_MASK;
@@ -817,11 +841,12 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
817 int i; 841 int i;
818 struct kvm_mmu_page *page; 842 struct kvm_mmu_page *page;
819 843
844 if (!VALID_PAGE(vcpu->mmu.root_hpa))
845 return;
820#ifdef CONFIG_X86_64 846#ifdef CONFIG_X86_64
821 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { 847 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
822 hpa_t root = vcpu->mmu.root_hpa; 848 hpa_t root = vcpu->mmu.root_hpa;
823 849
824 ASSERT(VALID_PAGE(root));
825 page = page_header(root); 850 page = page_header(root);
826 --page->root_count; 851 --page->root_count;
827 vcpu->mmu.root_hpa = INVALID_PAGE; 852 vcpu->mmu.root_hpa = INVALID_PAGE;
@@ -832,7 +857,6 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
832 hpa_t root = vcpu->mmu.pae_root[i]; 857 hpa_t root = vcpu->mmu.pae_root[i];
833 858
834 if (root) { 859 if (root) {
835 ASSERT(VALID_PAGE(root));
836 root &= PT64_BASE_ADDR_MASK; 860 root &= PT64_BASE_ADDR_MASK;
837 page = page_header(root); 861 page = page_header(root);
838 --page->root_count; 862 --page->root_count;
@@ -857,7 +881,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
857 ASSERT(!VALID_PAGE(root)); 881 ASSERT(!VALID_PAGE(root));
858 page = kvm_mmu_get_page(vcpu, root_gfn, 0, 882 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
859 PT64_ROOT_LEVEL, 0, 0, NULL); 883 PT64_ROOT_LEVEL, 0, 0, NULL);
860 root = page->page_hpa; 884 root = __pa(page->spt);
861 ++page->root_count; 885 ++page->root_count;
862 vcpu->mmu.root_hpa = root; 886 vcpu->mmu.root_hpa = root;
863 return; 887 return;
@@ -878,7 +902,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
878 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, 902 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
879 PT32_ROOT_LEVEL, !is_paging(vcpu), 903 PT32_ROOT_LEVEL, !is_paging(vcpu),
880 0, NULL); 904 0, NULL);
881 root = page->page_hpa; 905 root = __pa(page->spt);
882 ++page->root_count; 906 ++page->root_count;
883 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; 907 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
884 } 908 }
@@ -928,9 +952,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
928 context->free = nonpaging_free; 952 context->free = nonpaging_free;
929 context->root_level = 0; 953 context->root_level = 0;
930 context->shadow_root_level = PT32E_ROOT_LEVEL; 954 context->shadow_root_level = PT32E_ROOT_LEVEL;
931 mmu_alloc_roots(vcpu); 955 context->root_hpa = INVALID_PAGE;
932 ASSERT(VALID_PAGE(context->root_hpa));
933 kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
934 return 0; 956 return 0;
935} 957}
936 958
@@ -944,59 +966,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
944{ 966{
945 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); 967 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
946 mmu_free_roots(vcpu); 968 mmu_free_roots(vcpu);
947 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
948 kvm_mmu_free_some_pages(vcpu);
949 mmu_alloc_roots(vcpu);
950 kvm_mmu_flush_tlb(vcpu);
951 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
952}
953
954static inline void set_pte_common(struct kvm_vcpu *vcpu,
955 u64 *shadow_pte,
956 gpa_t gaddr,
957 int dirty,
958 u64 access_bits,
959 gfn_t gfn)
960{
961 hpa_t paddr;
962
963 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
964 if (!dirty)
965 access_bits &= ~PT_WRITABLE_MASK;
966
967 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
968
969 *shadow_pte |= access_bits;
970
971 if (is_error_hpa(paddr)) {
972 *shadow_pte |= gaddr;
973 *shadow_pte |= PT_SHADOW_IO_MARK;
974 *shadow_pte &= ~PT_PRESENT_MASK;
975 return;
976 }
977
978 *shadow_pte |= paddr;
979
980 if (access_bits & PT_WRITABLE_MASK) {
981 struct kvm_mmu_page *shadow;
982
983 shadow = kvm_mmu_lookup_page(vcpu, gfn);
984 if (shadow) {
985 pgprintk("%s: found shadow page for %lx, marking ro\n",
986 __FUNCTION__, gfn);
987 access_bits &= ~PT_WRITABLE_MASK;
988 if (is_writeble_pte(*shadow_pte)) {
989 *shadow_pte &= ~PT_WRITABLE_MASK;
990 kvm_arch_ops->tlb_flush(vcpu);
991 }
992 }
993 }
994
995 if (access_bits & PT_WRITABLE_MASK)
996 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
997
998 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
999 rmap_add(vcpu, shadow_pte);
1000} 969}
1001 970
1002static void inject_page_fault(struct kvm_vcpu *vcpu, 971static void inject_page_fault(struct kvm_vcpu *vcpu,
@@ -1006,23 +975,6 @@ static void inject_page_fault(struct kvm_vcpu *vcpu,
1006 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code); 975 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
1007} 976}
1008 977
1009static inline int fix_read_pf(u64 *shadow_ent)
1010{
1011 if ((*shadow_ent & PT_SHADOW_USER_MASK) &&
1012 !(*shadow_ent & PT_USER_MASK)) {
1013 /*
1014 * If supervisor write protect is disabled, we shadow kernel
1015 * pages as user pages so we can trap the write access.
1016 */
1017 *shadow_ent |= PT_USER_MASK;
1018 *shadow_ent &= ~PT_WRITABLE_MASK;
1019
1020 return 1;
1021
1022 }
1023 return 0;
1024}
1025
1026static void paging_free(struct kvm_vcpu *vcpu) 978static void paging_free(struct kvm_vcpu *vcpu)
1027{ 979{
1028 nonpaging_free(vcpu); 980 nonpaging_free(vcpu);
@@ -1047,10 +999,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1047 context->free = paging_free; 999 context->free = paging_free;
1048 context->root_level = level; 1000 context->root_level = level;
1049 context->shadow_root_level = level; 1001 context->shadow_root_level = level;
1050 mmu_alloc_roots(vcpu); 1002 context->root_hpa = INVALID_PAGE;
1051 ASSERT(VALID_PAGE(context->root_hpa));
1052 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1053 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1054 return 0; 1003 return 0;
1055} 1004}
1056 1005
@@ -1069,10 +1018,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu)
1069 context->free = paging_free; 1018 context->free = paging_free;
1070 context->root_level = PT32_ROOT_LEVEL; 1019 context->root_level = PT32_ROOT_LEVEL;
1071 context->shadow_root_level = PT32E_ROOT_LEVEL; 1020 context->shadow_root_level = PT32E_ROOT_LEVEL;
1072 mmu_alloc_roots(vcpu); 1021 context->root_hpa = INVALID_PAGE;
1073 ASSERT(VALID_PAGE(context->root_hpa));
1074 kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
1075 (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
1076 return 0; 1022 return 0;
1077} 1023}
1078 1024
@@ -1107,18 +1053,33 @@ static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1107 1053
1108int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 1054int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1109{ 1055{
1056 destroy_kvm_mmu(vcpu);
1057 return init_kvm_mmu(vcpu);
1058}
1059
1060int kvm_mmu_load(struct kvm_vcpu *vcpu)
1061{
1110 int r; 1062 int r;
1111 1063
1112 destroy_kvm_mmu(vcpu); 1064 spin_lock(&vcpu->kvm->lock);
1113 r = init_kvm_mmu(vcpu);
1114 if (r < 0)
1115 goto out;
1116 r = mmu_topup_memory_caches(vcpu); 1065 r = mmu_topup_memory_caches(vcpu);
1066 if (r)
1067 goto out;
1068 mmu_alloc_roots(vcpu);
1069 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1070 kvm_mmu_flush_tlb(vcpu);
1117out: 1071out:
1072 spin_unlock(&vcpu->kvm->lock);
1118 return r; 1073 return r;
1119} 1074}
1075EXPORT_SYMBOL_GPL(kvm_mmu_load);
1076
1077void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1078{
1079 mmu_free_roots(vcpu);
1080}
1120 1081
1121static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu, 1082static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1122 struct kvm_mmu_page *page, 1083 struct kvm_mmu_page *page,
1123 u64 *spte) 1084 u64 *spte)
1124{ 1085{
@@ -1135,9 +1096,25 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
1135 } 1096 }
1136 } 1097 }
1137 *spte = 0; 1098 *spte = 0;
1099 kvm_flush_remote_tlbs(vcpu->kvm);
1100}
1101
1102static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1103 struct kvm_mmu_page *page,
1104 u64 *spte,
1105 const void *new, int bytes)
1106{
1107 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1108 return;
1109
1110 if (page->role.glevels == PT32_ROOT_LEVEL)
1111 paging32_update_pte(vcpu, page, spte, new, bytes);
1112 else
1113 paging64_update_pte(vcpu, page, spte, new, bytes);
1138} 1114}
1139 1115
1140void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes) 1116void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1117 const u8 *old, const u8 *new, int bytes)
1141{ 1118{
1142 gfn_t gfn = gpa >> PAGE_SHIFT; 1119 gfn_t gfn = gpa >> PAGE_SHIFT;
1143 struct kvm_mmu_page *page; 1120 struct kvm_mmu_page *page;
@@ -1149,6 +1126,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1149 unsigned pte_size; 1126 unsigned pte_size;
1150 unsigned page_offset; 1127 unsigned page_offset;
1151 unsigned misaligned; 1128 unsigned misaligned;
1129 unsigned quadrant;
1152 int level; 1130 int level;
1153 int flooded = 0; 1131 int flooded = 0;
1154 int npte; 1132 int npte;
@@ -1169,6 +1147,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1169 continue; 1147 continue;
1170 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 1148 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1171 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 1149 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1150 misaligned |= bytes < 4;
1172 if (misaligned || flooded) { 1151 if (misaligned || flooded) {
1173 /* 1152 /*
1174 * Misaligned accesses are too much trouble to fix 1153 * Misaligned accesses are too much trouble to fix
@@ -1200,21 +1179,20 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1200 page_offset <<= 1; 1179 page_offset <<= 1;
1201 npte = 2; 1180 npte = 2;
1202 } 1181 }
1182 quadrant = page_offset >> PAGE_SHIFT;
1203 page_offset &= ~PAGE_MASK; 1183 page_offset &= ~PAGE_MASK;
1184 if (quadrant != page->role.quadrant)
1185 continue;
1204 } 1186 }
1205 spte = __va(page->page_hpa); 1187 spte = &page->spt[page_offset / sizeof(*spte)];
1206 spte += page_offset / sizeof(*spte);
1207 while (npte--) { 1188 while (npte--) {
1208 mmu_pre_write_zap_pte(vcpu, page, spte); 1189 mmu_pte_write_zap_pte(vcpu, page, spte);
1190 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
1209 ++spte; 1191 ++spte;
1210 } 1192 }
1211 } 1193 }
1212} 1194}
1213 1195
1214void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
1215{
1216}
1217
1218int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 1196int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1219{ 1197{
1220 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); 1198 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
@@ -1243,13 +1221,6 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
1243 struct kvm_mmu_page, link); 1221 struct kvm_mmu_page, link);
1244 kvm_mmu_zap_page(vcpu, page); 1222 kvm_mmu_zap_page(vcpu, page);
1245 } 1223 }
1246 while (!list_empty(&vcpu->free_pages)) {
1247 page = list_entry(vcpu->free_pages.next,
1248 struct kvm_mmu_page, link);
1249 list_del(&page->link);
1250 __free_page(pfn_to_page(page->page_hpa >> PAGE_SHIFT));
1251 page->page_hpa = INVALID_PAGE;
1252 }
1253 free_page((unsigned long)vcpu->mmu.pae_root); 1224 free_page((unsigned long)vcpu->mmu.pae_root);
1254} 1225}
1255 1226
@@ -1260,18 +1231,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1260 1231
1261 ASSERT(vcpu); 1232 ASSERT(vcpu);
1262 1233
1263 for (i = 0; i < KVM_NUM_MMU_PAGES; i++) { 1234 vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
1264 struct kvm_mmu_page *page_header = &vcpu->page_header_buf[i];
1265
1266 INIT_LIST_HEAD(&page_header->link);
1267 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1268 goto error_1;
1269 set_page_private(page, (unsigned long)page_header);
1270 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1271 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1272 list_add(&page_header->link, &vcpu->free_pages);
1273 ++vcpu->kvm->n_free_mmu_pages;
1274 }
1275 1235
1276 /* 1236 /*
1277 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 1237 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
@@ -1296,7 +1256,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
1296{ 1256{
1297 ASSERT(vcpu); 1257 ASSERT(vcpu);
1298 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1258 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1299 ASSERT(list_empty(&vcpu->free_pages));
1300 1259
1301 return alloc_mmu_pages(vcpu); 1260 return alloc_mmu_pages(vcpu);
1302} 1261}
@@ -1305,7 +1264,6 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1305{ 1264{
1306 ASSERT(vcpu); 1265 ASSERT(vcpu);
1307 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); 1266 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1308 ASSERT(!list_empty(&vcpu->free_pages));
1309 1267
1310 return init_kvm_mmu(vcpu); 1268 return init_kvm_mmu(vcpu);
1311} 1269}
@@ -1331,7 +1289,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
1331 if (!test_bit(slot, &page->slot_bitmap)) 1289 if (!test_bit(slot, &page->slot_bitmap))
1332 continue; 1290 continue;
1333 1291
1334 pt = __va(page->page_hpa); 1292 pt = page->spt;
1335 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 1293 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1336 /* avoid RMW */ 1294 /* avoid RMW */
1337 if (pt[i] & PT_WRITABLE_MASK) { 1295 if (pt[i] & PT_WRITABLE_MASK) {
@@ -1354,7 +1312,7 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
1354 } 1312 }
1355 1313
1356 mmu_free_memory_caches(vcpu); 1314 mmu_free_memory_caches(vcpu);
1357 kvm_arch_ops->tlb_flush(vcpu); 1315 kvm_flush_remote_tlbs(vcpu->kvm);
1358 init_kvm_mmu(vcpu); 1316 init_kvm_mmu(vcpu);
1359} 1317}
1360 1318
@@ -1364,6 +1322,10 @@ void kvm_mmu_module_exit(void)
1364 kmem_cache_destroy(pte_chain_cache); 1322 kmem_cache_destroy(pte_chain_cache);
1365 if (rmap_desc_cache) 1323 if (rmap_desc_cache)
1366 kmem_cache_destroy(rmap_desc_cache); 1324 kmem_cache_destroy(rmap_desc_cache);
1325 if (mmu_page_cache)
1326 kmem_cache_destroy(mmu_page_cache);
1327 if (mmu_page_header_cache)
1328 kmem_cache_destroy(mmu_page_header_cache);
1367} 1329}
1368 1330
1369int kvm_mmu_module_init(void) 1331int kvm_mmu_module_init(void)
@@ -1379,6 +1341,18 @@ int kvm_mmu_module_init(void)
1379 if (!rmap_desc_cache) 1341 if (!rmap_desc_cache)
1380 goto nomem; 1342 goto nomem;
1381 1343
1344 mmu_page_cache = kmem_cache_create("kvm_mmu_page",
1345 PAGE_SIZE,
1346 PAGE_SIZE, 0, NULL, NULL);
1347 if (!mmu_page_cache)
1348 goto nomem;
1349
1350 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1351 sizeof(struct kvm_mmu_page),
1352 0, 0, NULL, NULL);
1353 if (!mmu_page_header_cache)
1354 goto nomem;
1355
1382 return 0; 1356 return 0;
1383 1357
1384nomem: 1358nomem:
@@ -1482,7 +1456,7 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
1482 int i; 1456 int i;
1483 1457
1484 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { 1458 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1485 u64 *pt = __va(page->page_hpa); 1459 u64 *pt = page->spt;
1486 1460
1487 if (page->role.level != PT_PAGE_TABLE_LEVEL) 1461 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1488 continue; 1462 continue;
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 73ffbffb1097..a7c5cb0319ea 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -31,7 +31,6 @@
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #ifdef CONFIG_X86_64 34 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4 35 #define PT_MAX_FULL_LEVELS 4
37 #else 36 #else
@@ -46,7 +45,6 @@
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) 45 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 46 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 47 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50 #define PT_MAX_FULL_LEVELS 2 48 #define PT_MAX_FULL_LEVELS 2
51#else 49#else
52 #error Invalid PTTYPE value 50 #error Invalid PTTYPE value
@@ -192,40 +190,143 @@ static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
192 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]); 190 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
193} 191}
194 192
195static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, 193static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
196 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 194 u64 *shadow_pte,
195 gpa_t gaddr,
196 pt_element_t *gpte,
197 u64 access_bits,
198 int user_fault,
199 int write_fault,
200 int *ptwrite,
201 struct guest_walker *walker,
202 gfn_t gfn)
197{ 203{
198 ASSERT(*shadow_pte == 0); 204 hpa_t paddr;
199 access_bits &= guest_pte; 205 int dirty = *gpte & PT_DIRTY_MASK;
200 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); 206 u64 spte = *shadow_pte;
201 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, 207 int was_rmapped = is_rmap_pte(spte);
202 guest_pte & PT_DIRTY_MASK, access_bits, gfn); 208
209 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
210 " user_fault %d gfn %lx\n",
211 __FUNCTION__, spte, (u64)*gpte, access_bits,
212 write_fault, user_fault, gfn);
213
214 if (write_fault && !dirty) {
215 *gpte |= PT_DIRTY_MASK;
216 dirty = 1;
217 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
218 }
219
220 spte |= PT_PRESENT_MASK | PT_ACCESSED_MASK | PT_DIRTY_MASK;
221 spte |= *gpte & PT64_NX_MASK;
222 if (!dirty)
223 access_bits &= ~PT_WRITABLE_MASK;
224
225 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
226
227 spte |= PT_PRESENT_MASK;
228 if (access_bits & PT_USER_MASK)
229 spte |= PT_USER_MASK;
230
231 if (is_error_hpa(paddr)) {
232 spte |= gaddr;
233 spte |= PT_SHADOW_IO_MARK;
234 spte &= ~PT_PRESENT_MASK;
235 set_shadow_pte(shadow_pte, spte);
236 return;
237 }
238
239 spte |= paddr;
240
241 if ((access_bits & PT_WRITABLE_MASK)
242 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
243 struct kvm_mmu_page *shadow;
244
245 spte |= PT_WRITABLE_MASK;
246 if (user_fault) {
247 mmu_unshadow(vcpu, gfn);
248 goto unshadowed;
249 }
250
251 shadow = kvm_mmu_lookup_page(vcpu, gfn);
252 if (shadow) {
253 pgprintk("%s: found shadow page for %lx, marking ro\n",
254 __FUNCTION__, gfn);
255 access_bits &= ~PT_WRITABLE_MASK;
256 if (is_writeble_pte(spte)) {
257 spte &= ~PT_WRITABLE_MASK;
258 kvm_arch_ops->tlb_flush(vcpu);
259 }
260 if (write_fault)
261 *ptwrite = 1;
262 }
263 }
264
265unshadowed:
266
267 if (access_bits & PT_WRITABLE_MASK)
268 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
269
270 set_shadow_pte(shadow_pte, spte);
271 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
272 if (!was_rmapped)
273 rmap_add(vcpu, shadow_pte);
203} 274}
204 275
205static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, 276static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
206 u64 *shadow_pte, u64 access_bits, gfn_t gfn) 277 u64 *shadow_pte, u64 access_bits,
278 int user_fault, int write_fault, int *ptwrite,
279 struct guest_walker *walker, gfn_t gfn)
280{
281 access_bits &= *gpte;
282 FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
283 gpte, access_bits, user_fault, write_fault,
284 ptwrite, walker, gfn);
285}
286
287static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
288 u64 *spte, const void *pte, int bytes)
289{
290 pt_element_t gpte;
291
292 if (bytes < sizeof(pt_element_t))
293 return;
294 gpte = *(const pt_element_t *)pte;
295 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
296 return;
297 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
298 FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
299 0, NULL, NULL,
300 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
301}
302
303static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
304 u64 *shadow_pte, u64 access_bits,
305 int user_fault, int write_fault, int *ptwrite,
306 struct guest_walker *walker, gfn_t gfn)
207{ 307{
208 gpa_t gaddr; 308 gpa_t gaddr;
209 309
210 ASSERT(*shadow_pte == 0); 310 access_bits &= *gpde;
211 access_bits &= guest_pde;
212 gaddr = (gpa_t)gfn << PAGE_SHIFT; 311 gaddr = (gpa_t)gfn << PAGE_SHIFT;
213 if (PTTYPE == 32 && is_cpuid_PSE36()) 312 if (PTTYPE == 32 && is_cpuid_PSE36())
214 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 313 gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
215 (32 - PT32_DIR_PSE36_SHIFT); 314 (32 - PT32_DIR_PSE36_SHIFT);
216 *shadow_pte = guest_pde & PT_PTE_COPY_MASK; 315 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
217 set_pte_common(vcpu, shadow_pte, gaddr, 316 gpde, access_bits, user_fault, write_fault,
218 guest_pde & PT_DIRTY_MASK, access_bits, gfn); 317 ptwrite, walker, gfn);
219} 318}
220 319
221/* 320/*
222 * Fetch a shadow pte for a specific level in the paging hierarchy. 321 * Fetch a shadow pte for a specific level in the paging hierarchy.
223 */ 322 */
224static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 323static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
225 struct guest_walker *walker) 324 struct guest_walker *walker,
325 int user_fault, int write_fault, int *ptwrite)
226{ 326{
227 hpa_t shadow_addr; 327 hpa_t shadow_addr;
228 int level; 328 int level;
329 u64 *shadow_ent;
229 u64 *prev_shadow_ent = NULL; 330 u64 *prev_shadow_ent = NULL;
230 pt_element_t *guest_ent = walker->ptep; 331 pt_element_t *guest_ent = walker->ptep;
231 332
@@ -242,37 +343,23 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
242 343
243 for (; ; level--) { 344 for (; ; level--) {
244 u32 index = SHADOW_PT_INDEX(addr, level); 345 u32 index = SHADOW_PT_INDEX(addr, level);
245 u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index;
246 struct kvm_mmu_page *shadow_page; 346 struct kvm_mmu_page *shadow_page;
247 u64 shadow_pte; 347 u64 shadow_pte;
248 int metaphysical; 348 int metaphysical;
249 gfn_t table_gfn; 349 gfn_t table_gfn;
250 unsigned hugepage_access = 0; 350 unsigned hugepage_access = 0;
251 351
352 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
252 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { 353 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
253 if (level == PT_PAGE_TABLE_LEVEL) 354 if (level == PT_PAGE_TABLE_LEVEL)
254 return shadow_ent; 355 break;
255 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; 356 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
256 prev_shadow_ent = shadow_ent; 357 prev_shadow_ent = shadow_ent;
257 continue; 358 continue;
258 } 359 }
259 360
260 if (level == PT_PAGE_TABLE_LEVEL) { 361 if (level == PT_PAGE_TABLE_LEVEL)
261 362 break;
262 if (walker->level == PT_DIRECTORY_LEVEL) {
263 if (prev_shadow_ent)
264 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
265 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
266 walker->inherited_ar,
267 walker->gfn);
268 } else {
269 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
270 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
271 walker->inherited_ar,
272 walker->gfn);
273 }
274 return shadow_ent;
275 }
276 363
277 if (level - 1 == PT_PAGE_TABLE_LEVEL 364 if (level - 1 == PT_PAGE_TABLE_LEVEL
278 && walker->level == PT_DIRECTORY_LEVEL) { 365 && walker->level == PT_DIRECTORY_LEVEL) {
@@ -289,90 +376,24 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
289 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 376 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
290 metaphysical, hugepage_access, 377 metaphysical, hugepage_access,
291 shadow_ent); 378 shadow_ent);
292 shadow_addr = shadow_page->page_hpa; 379 shadow_addr = __pa(shadow_page->spt);
293 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 380 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
294 | PT_WRITABLE_MASK | PT_USER_MASK; 381 | PT_WRITABLE_MASK | PT_USER_MASK;
295 *shadow_ent = shadow_pte; 382 *shadow_ent = shadow_pte;
296 prev_shadow_ent = shadow_ent; 383 prev_shadow_ent = shadow_ent;
297 } 384 }
298}
299 385
300/* 386 if (walker->level == PT_DIRECTORY_LEVEL) {
301 * The guest faulted for write. We need to 387 FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
302 * 388 walker->inherited_ar, user_fault, write_fault,
303 * - check write permissions 389 ptwrite, walker, walker->gfn);
304 * - update the guest pte dirty bit 390 } else {
305 * - update our own dirty page tracking structures 391 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
306 */ 392 FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
307static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, 393 walker->inherited_ar, user_fault, write_fault,
308 u64 *shadow_ent, 394 ptwrite, walker, walker->gfn);
309 struct guest_walker *walker,
310 gva_t addr,
311 int user,
312 int *write_pt)
313{
314 pt_element_t *guest_ent;
315 int writable_shadow;
316 gfn_t gfn;
317 struct kvm_mmu_page *page;
318
319 if (is_writeble_pte(*shadow_ent))
320 return !user || (*shadow_ent & PT_USER_MASK);
321
322 writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK;
323 if (user) {
324 /*
325 * User mode access. Fail if it's a kernel page or a read-only
326 * page.
327 */
328 if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow)
329 return 0;
330 ASSERT(*shadow_ent & PT_USER_MASK);
331 } else
332 /*
333 * Kernel mode access. Fail if it's a read-only page and
334 * supervisor write protection is enabled.
335 */
336 if (!writable_shadow) {
337 if (is_write_protection(vcpu))
338 return 0;
339 *shadow_ent &= ~PT_USER_MASK;
340 }
341
342 guest_ent = walker->ptep;
343
344 if (!is_present_pte(*guest_ent)) {
345 *shadow_ent = 0;
346 return 0;
347 } 395 }
348 396 return shadow_ent;
349 gfn = walker->gfn;
350
351 if (user) {
352 /*
353 * Usermode page faults won't be for page table updates.
354 */
355 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
356 pgprintk("%s: zap %lx %x\n",
357 __FUNCTION__, gfn, page->role.word);
358 kvm_mmu_zap_page(vcpu, page);
359 }
360 } else if (kvm_mmu_lookup_page(vcpu, gfn)) {
361 pgprintk("%s: found shadow page for %lx, marking ro\n",
362 __FUNCTION__, gfn);
363 mark_page_dirty(vcpu->kvm, gfn);
364 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
365 *guest_ent |= PT_DIRTY_MASK;
366 *write_pt = 1;
367 return 0;
368 }
369 mark_page_dirty(vcpu->kvm, gfn);
370 *shadow_ent |= PT_WRITABLE_MASK;
371 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
372 *guest_ent |= PT_DIRTY_MASK;
373 rmap_add(vcpu, shadow_ent);
374
375 return 1;
376} 397}
377 398
378/* 399/*
@@ -397,7 +418,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
397 int fetch_fault = error_code & PFERR_FETCH_MASK; 418 int fetch_fault = error_code & PFERR_FETCH_MASK;
398 struct guest_walker walker; 419 struct guest_walker walker;
399 u64 *shadow_pte; 420 u64 *shadow_pte;
400 int fixed;
401 int write_pt = 0; 421 int write_pt = 0;
402 int r; 422 int r;
403 423
@@ -421,27 +441,20 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
421 pgprintk("%s: guest page fault\n", __FUNCTION__); 441 pgprintk("%s: guest page fault\n", __FUNCTION__);
422 inject_page_fault(vcpu, addr, walker.error_code); 442 inject_page_fault(vcpu, addr, walker.error_code);
423 FNAME(release_walker)(&walker); 443 FNAME(release_walker)(&walker);
444 vcpu->last_pt_write_count = 0; /* reset fork detector */
424 return 0; 445 return 0;
425 } 446 }
426 447
427 shadow_pte = FNAME(fetch)(vcpu, addr, &walker); 448 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
428 pgprintk("%s: shadow pte %p %llx\n", __FUNCTION__, 449 &write_pt);
429 shadow_pte, *shadow_pte); 450 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
430 451 shadow_pte, *shadow_pte, write_pt);
431 /*
432 * Update the shadow pte.
433 */
434 if (write_fault)
435 fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr,
436 user_fault, &write_pt);
437 else
438 fixed = fix_read_pf(shadow_pte);
439
440 pgprintk("%s: updated shadow pte %p %llx\n", __FUNCTION__,
441 shadow_pte, *shadow_pte);
442 452
443 FNAME(release_walker)(&walker); 453 FNAME(release_walker)(&walker);
444 454
455 if (!write_pt)
456 vcpu->last_pt_write_count = 0; /* reset fork detector */
457
445 /* 458 /*
446 * mmio: emulate if accessible, otherwise its a guest fault. 459 * mmio: emulate if accessible, otherwise its a guest fault.
447 */ 460 */
@@ -478,7 +491,5 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
478#undef PT_INDEX 491#undef PT_INDEX
479#undef SHADOW_PT_INDEX 492#undef SHADOW_PT_INDEX
480#undef PT_LEVEL_MASK 493#undef PT_LEVEL_MASK
481#undef PT_PTE_COPY_MASK
482#undef PT_NON_PTE_COPY_MASK
483#undef PT_DIR_BASE_ADDR_MASK 494#undef PT_DIR_BASE_ADDR_MASK
484#undef PT_MAX_FULL_LEVELS 495#undef PT_MAX_FULL_LEVELS
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index fa17d6d4f0cb..bc818cc126e3 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -14,16 +14,17 @@
14 * 14 *
15 */ 15 */
16 16
17#include "kvm_svm.h"
18#include "x86_emulate.h"
19
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/kernel.h> 21#include <linux/kernel.h>
19#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
20#include <linux/highmem.h> 23#include <linux/highmem.h>
21#include <linux/profile.h> 24#include <linux/profile.h>
22#include <linux/sched.h> 25#include <linux/sched.h>
23#include <asm/desc.h>
24 26
25#include "kvm_svm.h" 27#include <asm/desc.h>
26#include "x86_emulate.h"
27 28
28MODULE_AUTHOR("Qumranet"); 29MODULE_AUTHOR("Qumranet");
29MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
@@ -378,7 +379,7 @@ static __init int svm_hardware_setup(void)
378 int cpu; 379 int cpu;
379 struct page *iopm_pages; 380 struct page *iopm_pages;
380 struct page *msrpm_pages; 381 struct page *msrpm_pages;
381 void *msrpm_va; 382 void *iopm_va, *msrpm_va;
382 int r; 383 int r;
383 384
384 kvm_emulator_want_group7_invlpg(); 385 kvm_emulator_want_group7_invlpg();
@@ -387,8 +388,10 @@ static __init int svm_hardware_setup(void)
387 388
388 if (!iopm_pages) 389 if (!iopm_pages)
389 return -ENOMEM; 390 return -ENOMEM;
390 memset(page_address(iopm_pages), 0xff, 391
391 PAGE_SIZE * (1 << IOPM_ALLOC_ORDER)); 392 iopm_va = page_address(iopm_pages);
393 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
394 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
392 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; 395 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
393 396
394 397
@@ -579,7 +582,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
579 goto out2; 582 goto out2;
580 583
581 vcpu->svm->vmcb = page_address(page); 584 vcpu->svm->vmcb = page_address(page);
582 memset(vcpu->svm->vmcb, 0, PAGE_SIZE); 585 clear_page(vcpu->svm->vmcb);
583 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 586 vcpu->svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
584 vcpu->svm->asid_generation = 0; 587 vcpu->svm->asid_generation = 0;
585 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs)); 588 memset(vcpu->svm->db_regs, 0, sizeof(vcpu->svm->db_regs));
@@ -587,9 +590,9 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
587 590
588 fx_init(vcpu); 591 fx_init(vcpu);
589 vcpu->fpu_active = 1; 592 vcpu->fpu_active = 1;
590 vcpu->apic_base = 0xfee00000 | 593 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
591 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | 594 if (vcpu == &vcpu->kvm->vcpus[0])
592 MSR_IA32_APICBASE_ENABLE; 595 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
593 596
594 return 0; 597 return 0;
595 598
@@ -955,7 +958,7 @@ static int shutdown_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
955 * VMCB is undefined after a SHUTDOWN intercept 958 * VMCB is undefined after a SHUTDOWN intercept
956 * so reinitialize it. 959 * so reinitialize it.
957 */ 960 */
958 memset(vcpu->svm->vmcb, 0, PAGE_SIZE); 961 clear_page(vcpu->svm->vmcb);
959 init_vmcb(vcpu->svm->vmcb); 962 init_vmcb(vcpu->svm->vmcb);
960 963
961 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; 964 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
@@ -1113,12 +1116,7 @@ static int halt_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1113{ 1116{
1114 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1; 1117 vcpu->svm->next_rip = vcpu->svm->vmcb->save.rip + 1;
1115 skip_emulated_instruction(vcpu); 1118 skip_emulated_instruction(vcpu);
1116 if (vcpu->irq_summary) 1119 return kvm_emulate_halt(vcpu);
1117 return 1;
1118
1119 kvm_run->exit_reason = KVM_EXIT_HLT;
1120 ++vcpu->stat.halt_exits;
1121 return 0;
1122} 1120}
1123 1121
1124static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1122static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1473,6 +1471,11 @@ static void load_db_regs(unsigned long *db_regs)
1473 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3])); 1471 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1474} 1472}
1475 1473
1474static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1475{
1476 force_new_asid(vcpu);
1477}
1478
1476static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1479static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1477{ 1480{
1478 u16 fs_selector; 1481 u16 fs_selector;
@@ -1481,11 +1484,20 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1481 int r; 1484 int r;
1482 1485
1483again: 1486again:
1487 r = kvm_mmu_reload(vcpu);
1488 if (unlikely(r))
1489 return r;
1490
1484 if (!vcpu->mmio_read_completed) 1491 if (!vcpu->mmio_read_completed)
1485 do_interrupt_requests(vcpu, kvm_run); 1492 do_interrupt_requests(vcpu, kvm_run);
1486 1493
1487 clgi(); 1494 clgi();
1488 1495
1496 vcpu->guest_mode = 1;
1497 if (vcpu->requests)
1498 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1499 svm_flush_tlb(vcpu);
1500
1489 pre_svm_run(vcpu); 1501 pre_svm_run(vcpu);
1490 1502
1491 save_host_msrs(vcpu); 1503 save_host_msrs(vcpu);
@@ -1617,6 +1629,8 @@ again:
1617#endif 1629#endif
1618 : "cc", "memory" ); 1630 : "cc", "memory" );
1619 1631
1632 vcpu->guest_mode = 0;
1633
1620 if (vcpu->fpu_active) { 1634 if (vcpu->fpu_active) {
1621 fx_save(vcpu->guest_fx_image); 1635 fx_save(vcpu->guest_fx_image);
1622 fx_restore(vcpu->host_fx_image); 1636 fx_restore(vcpu->host_fx_image);
@@ -1681,11 +1695,6 @@ again:
1681 return r; 1695 return r;
1682} 1696}
1683 1697
1684static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1685{
1686 force_new_asid(vcpu);
1687}
1688
1689static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root) 1698static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1690{ 1699{
1691 vcpu->svm->vmcb->save.cr3 = root; 1700 vcpu->svm->vmcb->save.cr3 = root;
@@ -1727,6 +1736,12 @@ static void svm_inject_page_fault(struct kvm_vcpu *vcpu,
1727 1736
1728static int is_disabled(void) 1737static int is_disabled(void)
1729{ 1738{
1739 u64 vm_cr;
1740
1741 rdmsrl(MSR_VM_CR, vm_cr);
1742 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1743 return 1;
1744
1730 return 0; 1745 return 0;
1731} 1746}
1732 1747
diff --git a/drivers/kvm/svm.h b/drivers/kvm/svm.h
index 5e93814400ce..3b1b0f35b6cb 100644
--- a/drivers/kvm/svm.h
+++ b/drivers/kvm/svm.h
@@ -175,8 +175,11 @@ struct __attribute__ ((__packed__)) vmcb {
175#define SVM_CPUID_FUNC 0x8000000a 175#define SVM_CPUID_FUNC 0x8000000a
176 176
177#define MSR_EFER_SVME_MASK (1ULL << 12) 177#define MSR_EFER_SVME_MASK (1ULL << 12)
178#define MSR_VM_CR 0xc0010114
178#define MSR_VM_HSAVE_PA 0xc0010117ULL 179#define MSR_VM_HSAVE_PA 0xc0010117ULL
179 180
181#define SVM_VM_CR_SVM_DISABLE 4
182
180#define SVM_SELECTOR_S_SHIFT 4 183#define SVM_SELECTOR_S_SHIFT 4
181#define SVM_SELECTOR_DPL_SHIFT 5 184#define SVM_SELECTOR_DPL_SHIFT 5
182#define SVM_SELECTOR_P_SHIFT 7 185#define SVM_SELECTOR_P_SHIFT 7
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index c1ac106ace8c..80628f69916d 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -17,28 +17,35 @@
17 17
18#include "kvm.h" 18#include "kvm.h"
19#include "vmx.h" 19#include "vmx.h"
20#include "segment_descriptor.h"
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/kernel.h> 23#include <linux/kernel.h>
22#include <linux/mm.h> 24#include <linux/mm.h>
23#include <linux/highmem.h> 25#include <linux/highmem.h>
24#include <linux/profile.h> 26#include <linux/profile.h>
25#include <linux/sched.h> 27#include <linux/sched.h>
28
26#include <asm/io.h> 29#include <asm/io.h>
27#include <asm/desc.h> 30#include <asm/desc.h>
28 31
29#include "segment_descriptor.h"
30
31MODULE_AUTHOR("Qumranet"); 32MODULE_AUTHOR("Qumranet");
32MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
33 34
35static int init_rmode_tss(struct kvm *kvm);
36
34static DEFINE_PER_CPU(struct vmcs *, vmxarea); 37static DEFINE_PER_CPU(struct vmcs *, vmxarea);
35static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 38static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
36 39
40static struct page *vmx_io_bitmap_a;
41static struct page *vmx_io_bitmap_b;
42
37#ifdef CONFIG_X86_64 43#ifdef CONFIG_X86_64
38#define HOST_IS_64 1 44#define HOST_IS_64 1
39#else 45#else
40#define HOST_IS_64 0 46#define HOST_IS_64 0
41#endif 47#endif
48#define EFER_SAVE_RESTORE_BITS ((u64)EFER_SCE)
42 49
43static struct vmcs_descriptor { 50static struct vmcs_descriptor {
44 int size; 51 int size;
@@ -82,18 +89,17 @@ static const u32 vmx_msr_index[] = {
82}; 89};
83#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) 90#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
84 91
85#ifdef CONFIG_X86_64 92static inline u64 msr_efer_save_restore_bits(struct vmx_msr_entry msr)
86static unsigned msr_offset_kernel_gs_base; 93{
87#define NR_64BIT_MSRS 4 94 return (u64)msr.data & EFER_SAVE_RESTORE_BITS;
88/* 95}
89 * avoid save/load MSR_SYSCALL_MASK and MSR_LSTAR by std vt 96
90 * mechanism (cpu bug AA24) 97static inline int msr_efer_need_save_restore(struct kvm_vcpu *vcpu)
91 */ 98{
92#define NR_BAD_MSRS 2 99 int efer_offset = vcpu->msr_offset_efer;
93#else 100 return msr_efer_save_restore_bits(vcpu->host_msrs[efer_offset]) !=
94#define NR_64BIT_MSRS 0 101 msr_efer_save_restore_bits(vcpu->guest_msrs[efer_offset]);
95#define NR_BAD_MSRS 0 102}
96#endif
97 103
98static inline int is_page_fault(u32 intr_info) 104static inline int is_page_fault(u32 intr_info)
99{ 105{
@@ -115,13 +121,23 @@ static inline int is_external_interrupt(u32 intr_info)
115 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); 121 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
116} 122}
117 123
118static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr) 124static int __find_msr_index(struct kvm_vcpu *vcpu, u32 msr)
119{ 125{
120 int i; 126 int i;
121 127
122 for (i = 0; i < vcpu->nmsrs; ++i) 128 for (i = 0; i < vcpu->nmsrs; ++i)
123 if (vcpu->guest_msrs[i].index == msr) 129 if (vcpu->guest_msrs[i].index == msr)
124 return &vcpu->guest_msrs[i]; 130 return i;
131 return -1;
132}
133
134static struct vmx_msr_entry *find_msr_entry(struct kvm_vcpu *vcpu, u32 msr)
135{
136 int i;
137
138 i = __find_msr_index(vcpu, msr);
139 if (i >= 0)
140 return &vcpu->guest_msrs[i];
125 return NULL; 141 return NULL;
126} 142}
127 143
@@ -147,6 +163,7 @@ static void __vcpu_clear(void *arg)
147 vmcs_clear(vcpu->vmcs); 163 vmcs_clear(vcpu->vmcs);
148 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs) 164 if (per_cpu(current_vmcs, cpu) == vcpu->vmcs)
149 per_cpu(current_vmcs, cpu) = NULL; 165 per_cpu(current_vmcs, cpu) = NULL;
166 rdtscll(vcpu->host_tsc);
150} 167}
151 168
152static void vcpu_clear(struct kvm_vcpu *vcpu) 169static void vcpu_clear(struct kvm_vcpu *vcpu)
@@ -234,6 +251,127 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
234 vmcs_writel(field, vmcs_readl(field) | mask); 251 vmcs_writel(field, vmcs_readl(field) | mask);
235} 252}
236 253
254static void update_exception_bitmap(struct kvm_vcpu *vcpu)
255{
256 u32 eb;
257
258 eb = 1u << PF_VECTOR;
259 if (!vcpu->fpu_active)
260 eb |= 1u << NM_VECTOR;
261 if (vcpu->guest_debug.enabled)
262 eb |= 1u << 1;
263 if (vcpu->rmode.active)
264 eb = ~0;
265 vmcs_write32(EXCEPTION_BITMAP, eb);
266}
267
268static void reload_tss(void)
269{
270#ifndef CONFIG_X86_64
271
272 /*
273 * VT restores TR but not its size. Useless.
274 */
275 struct descriptor_table gdt;
276 struct segment_descriptor *descs;
277
278 get_gdt(&gdt);
279 descs = (void *)gdt.base;
280 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
281 load_TR_desc();
282#endif
283}
284
285static void load_transition_efer(struct kvm_vcpu *vcpu)
286{
287 u64 trans_efer;
288 int efer_offset = vcpu->msr_offset_efer;
289
290 trans_efer = vcpu->host_msrs[efer_offset].data;
291 trans_efer &= ~EFER_SAVE_RESTORE_BITS;
292 trans_efer |= msr_efer_save_restore_bits(
293 vcpu->guest_msrs[efer_offset]);
294 wrmsrl(MSR_EFER, trans_efer);
295 vcpu->stat.efer_reload++;
296}
297
298static void vmx_save_host_state(struct kvm_vcpu *vcpu)
299{
300 struct vmx_host_state *hs = &vcpu->vmx_host_state;
301
302 if (hs->loaded)
303 return;
304
305 hs->loaded = 1;
306 /*
307 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
308 * allow segment selectors with cpl > 0 or ti == 1.
309 */
310 hs->ldt_sel = read_ldt();
311 hs->fs_gs_ldt_reload_needed = hs->ldt_sel;
312 hs->fs_sel = read_fs();
313 if (!(hs->fs_sel & 7))
314 vmcs_write16(HOST_FS_SELECTOR, hs->fs_sel);
315 else {
316 vmcs_write16(HOST_FS_SELECTOR, 0);
317 hs->fs_gs_ldt_reload_needed = 1;
318 }
319 hs->gs_sel = read_gs();
320 if (!(hs->gs_sel & 7))
321 vmcs_write16(HOST_GS_SELECTOR, hs->gs_sel);
322 else {
323 vmcs_write16(HOST_GS_SELECTOR, 0);
324 hs->fs_gs_ldt_reload_needed = 1;
325 }
326
327#ifdef CONFIG_X86_64
328 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
329 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
330#else
331 vmcs_writel(HOST_FS_BASE, segment_base(hs->fs_sel));
332 vmcs_writel(HOST_GS_BASE, segment_base(hs->gs_sel));
333#endif
334
335#ifdef CONFIG_X86_64
336 if (is_long_mode(vcpu)) {
337 save_msrs(vcpu->host_msrs + vcpu->msr_offset_kernel_gs_base, 1);
338 }
339#endif
340 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
341 if (msr_efer_need_save_restore(vcpu))
342 load_transition_efer(vcpu);
343}
344
345static void vmx_load_host_state(struct kvm_vcpu *vcpu)
346{
347 struct vmx_host_state *hs = &vcpu->vmx_host_state;
348
349 if (!hs->loaded)
350 return;
351
352 hs->loaded = 0;
353 if (hs->fs_gs_ldt_reload_needed) {
354 load_ldt(hs->ldt_sel);
355 load_fs(hs->fs_sel);
356 /*
357 * If we have to reload gs, we must take care to
358 * preserve our gs base.
359 */
360 local_irq_disable();
361 load_gs(hs->gs_sel);
362#ifdef CONFIG_X86_64
363 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
364#endif
365 local_irq_enable();
366
367 reload_tss();
368 }
369 save_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
370 load_msrs(vcpu->host_msrs, vcpu->save_nmsrs);
371 if (msr_efer_need_save_restore(vcpu))
372 load_msrs(vcpu->host_msrs + vcpu->msr_offset_efer, 1);
373}
374
237/* 375/*
238 * Switches to specified vcpu, until a matching vcpu_put(), but assumes 376 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
239 * vcpu mutex is already taken. 377 * vcpu mutex is already taken.
@@ -242,6 +380,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
242{ 380{
243 u64 phys_addr = __pa(vcpu->vmcs); 381 u64 phys_addr = __pa(vcpu->vmcs);
244 int cpu; 382 int cpu;
383 u64 tsc_this, delta;
245 384
246 cpu = get_cpu(); 385 cpu = get_cpu();
247 386
@@ -275,15 +414,43 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu)
275 414
276 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 415 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
277 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 416 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
417
418 /*
419 * Make sure the time stamp counter is monotonous.
420 */
421 rdtscll(tsc_this);
422 delta = vcpu->host_tsc - tsc_this;
423 vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
278 } 424 }
279} 425}
280 426
281static void vmx_vcpu_put(struct kvm_vcpu *vcpu) 427static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
282{ 428{
429 vmx_load_host_state(vcpu);
283 kvm_put_guest_fpu(vcpu); 430 kvm_put_guest_fpu(vcpu);
284 put_cpu(); 431 put_cpu();
285} 432}
286 433
434static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
435{
436 if (vcpu->fpu_active)
437 return;
438 vcpu->fpu_active = 1;
439 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
440 if (vcpu->cr0 & CR0_TS_MASK)
441 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
442 update_exception_bitmap(vcpu);
443}
444
445static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
446{
447 if (!vcpu->fpu_active)
448 return;
449 vcpu->fpu_active = 0;
450 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
451 update_exception_bitmap(vcpu);
452}
453
287static void vmx_vcpu_decache(struct kvm_vcpu *vcpu) 454static void vmx_vcpu_decache(struct kvm_vcpu *vcpu)
288{ 455{
289 vcpu_clear(vcpu); 456 vcpu_clear(vcpu);
@@ -332,41 +499,61 @@ static void vmx_inject_gp(struct kvm_vcpu *vcpu, unsigned error_code)
332} 499}
333 500
334/* 501/*
502 * Swap MSR entry in host/guest MSR entry array.
503 */
504void move_msr_up(struct kvm_vcpu *vcpu, int from, int to)
505{
506 struct vmx_msr_entry tmp;
507 tmp = vcpu->guest_msrs[to];
508 vcpu->guest_msrs[to] = vcpu->guest_msrs[from];
509 vcpu->guest_msrs[from] = tmp;
510 tmp = vcpu->host_msrs[to];
511 vcpu->host_msrs[to] = vcpu->host_msrs[from];
512 vcpu->host_msrs[from] = tmp;
513}
514
515/*
335 * Set up the vmcs to automatically save and restore system 516 * Set up the vmcs to automatically save and restore system
336 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 517 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
337 * mode, as fiddling with msrs is very expensive. 518 * mode, as fiddling with msrs is very expensive.
338 */ 519 */
339static void setup_msrs(struct kvm_vcpu *vcpu) 520static void setup_msrs(struct kvm_vcpu *vcpu)
340{ 521{
341 int nr_skip, nr_good_msrs; 522 int save_nmsrs;
342
343 if (is_long_mode(vcpu))
344 nr_skip = NR_BAD_MSRS;
345 else
346 nr_skip = NR_64BIT_MSRS;
347 nr_good_msrs = vcpu->nmsrs - nr_skip;
348 523
349 /* 524 save_nmsrs = 0;
350 * MSR_K6_STAR is only needed on long mode guests, and only
351 * if efer.sce is enabled.
352 */
353 if (find_msr_entry(vcpu, MSR_K6_STAR)) {
354 --nr_good_msrs;
355#ifdef CONFIG_X86_64 525#ifdef CONFIG_X86_64
356 if (is_long_mode(vcpu) && (vcpu->shadow_efer & EFER_SCE)) 526 if (is_long_mode(vcpu)) {
357 ++nr_good_msrs; 527 int index;
358#endif 528
529 index = __find_msr_index(vcpu, MSR_SYSCALL_MASK);
530 if (index >= 0)
531 move_msr_up(vcpu, index, save_nmsrs++);
532 index = __find_msr_index(vcpu, MSR_LSTAR);
533 if (index >= 0)
534 move_msr_up(vcpu, index, save_nmsrs++);
535 index = __find_msr_index(vcpu, MSR_CSTAR);
536 if (index >= 0)
537 move_msr_up(vcpu, index, save_nmsrs++);
538 index = __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
539 if (index >= 0)
540 move_msr_up(vcpu, index, save_nmsrs++);
541 /*
542 * MSR_K6_STAR is only needed on long mode guests, and only
543 * if efer.sce is enabled.
544 */
545 index = __find_msr_index(vcpu, MSR_K6_STAR);
546 if ((index >= 0) && (vcpu->shadow_efer & EFER_SCE))
547 move_msr_up(vcpu, index, save_nmsrs++);
359 } 548 }
549#endif
550 vcpu->save_nmsrs = save_nmsrs;
360 551
361 vmcs_writel(VM_ENTRY_MSR_LOAD_ADDR, 552#ifdef CONFIG_X86_64
362 virt_to_phys(vcpu->guest_msrs + nr_skip)); 553 vcpu->msr_offset_kernel_gs_base =
363 vmcs_writel(VM_EXIT_MSR_STORE_ADDR, 554 __find_msr_index(vcpu, MSR_KERNEL_GS_BASE);
364 virt_to_phys(vcpu->guest_msrs + nr_skip)); 555#endif
365 vmcs_writel(VM_EXIT_MSR_LOAD_ADDR, 556 vcpu->msr_offset_efer = __find_msr_index(vcpu, MSR_EFER);
366 virt_to_phys(vcpu->host_msrs + nr_skip));
367 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, nr_good_msrs); /* 22.2.2 */
368 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
369 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, nr_good_msrs); /* 22.2.2 */
370} 557}
371 558
372/* 559/*
@@ -394,23 +581,6 @@ static void guest_write_tsc(u64 guest_tsc)
394 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc); 581 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
395} 582}
396 583
397static void reload_tss(void)
398{
399#ifndef CONFIG_X86_64
400
401 /*
402 * VT restores TR but not its size. Useless.
403 */
404 struct descriptor_table gdt;
405 struct segment_descriptor *descs;
406
407 get_gdt(&gdt);
408 descs = (void *)gdt.base;
409 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
410 load_TR_desc();
411#endif
412}
413
414/* 584/*
415 * Reads an msr value (of 'msr_index') into 'pdata'. 585 * Reads an msr value (of 'msr_index') into 'pdata'.
416 * Returns 0 on success, non-0 otherwise. 586 * Returns 0 on success, non-0 otherwise.
@@ -470,10 +640,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
470static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 640static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
471{ 641{
472 struct vmx_msr_entry *msr; 642 struct vmx_msr_entry *msr;
643 int ret = 0;
644
473 switch (msr_index) { 645 switch (msr_index) {
474#ifdef CONFIG_X86_64 646#ifdef CONFIG_X86_64
475 case MSR_EFER: 647 case MSR_EFER:
476 return kvm_set_msr_common(vcpu, msr_index, data); 648 ret = kvm_set_msr_common(vcpu, msr_index, data);
649 if (vcpu->vmx_host_state.loaded)
650 load_transition_efer(vcpu);
651 break;
477 case MSR_FS_BASE: 652 case MSR_FS_BASE:
478 vmcs_writel(GUEST_FS_BASE, data); 653 vmcs_writel(GUEST_FS_BASE, data);
479 break; 654 break;
@@ -497,14 +672,14 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
497 msr = find_msr_entry(vcpu, msr_index); 672 msr = find_msr_entry(vcpu, msr_index);
498 if (msr) { 673 if (msr) {
499 msr->data = data; 674 msr->data = data;
675 if (vcpu->vmx_host_state.loaded)
676 load_msrs(vcpu->guest_msrs, vcpu->save_nmsrs);
500 break; 677 break;
501 } 678 }
502 return kvm_set_msr_common(vcpu, msr_index, data); 679 ret = kvm_set_msr_common(vcpu, msr_index, data);
503 msr->data = data;
504 break;
505 } 680 }
506 681
507 return 0; 682 return ret;
508} 683}
509 684
510/* 685/*
@@ -530,10 +705,8 @@ static void vcpu_put_rsp_rip(struct kvm_vcpu *vcpu)
530static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 705static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
531{ 706{
532 unsigned long dr7 = 0x400; 707 unsigned long dr7 = 0x400;
533 u32 exception_bitmap;
534 int old_singlestep; 708 int old_singlestep;
535 709
536 exception_bitmap = vmcs_read32(EXCEPTION_BITMAP);
537 old_singlestep = vcpu->guest_debug.singlestep; 710 old_singlestep = vcpu->guest_debug.singlestep;
538 711
539 vcpu->guest_debug.enabled = dbg->enabled; 712 vcpu->guest_debug.enabled = dbg->enabled;
@@ -549,13 +722,9 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
549 dr7 |= 0 << (i*4+16); /* execution breakpoint */ 722 dr7 |= 0 << (i*4+16); /* execution breakpoint */
550 } 723 }
551 724
552 exception_bitmap |= (1u << 1); /* Trap debug exceptions */
553
554 vcpu->guest_debug.singlestep = dbg->singlestep; 725 vcpu->guest_debug.singlestep = dbg->singlestep;
555 } else { 726 } else
556 exception_bitmap &= ~(1u << 1); /* Ignore debug exceptions */
557 vcpu->guest_debug.singlestep = 0; 727 vcpu->guest_debug.singlestep = 0;
558 }
559 728
560 if (old_singlestep && !vcpu->guest_debug.singlestep) { 729 if (old_singlestep && !vcpu->guest_debug.singlestep) {
561 unsigned long flags; 730 unsigned long flags;
@@ -565,7 +734,7 @@ static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
565 vmcs_writel(GUEST_RFLAGS, flags); 734 vmcs_writel(GUEST_RFLAGS, flags);
566 } 735 }
567 736
568 vmcs_write32(EXCEPTION_BITMAP, exception_bitmap); 737 update_exception_bitmap(vcpu);
569 vmcs_writel(GUEST_DR7, dr7); 738 vmcs_writel(GUEST_DR7, dr7);
570 739
571 return 0; 740 return 0;
@@ -679,14 +848,6 @@ static __exit void hardware_unsetup(void)
679 free_kvm_area(); 848 free_kvm_area();
680} 849}
681 850
682static void update_exception_bitmap(struct kvm_vcpu *vcpu)
683{
684 if (vcpu->rmode.active)
685 vmcs_write32(EXCEPTION_BITMAP, ~0);
686 else
687 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
688}
689
690static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) 851static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
691{ 852{
692 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 853 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -793,6 +954,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
793 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds); 954 fix_rmode_seg(VCPU_SREG_DS, &vcpu->rmode.ds);
794 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs); 955 fix_rmode_seg(VCPU_SREG_GS, &vcpu->rmode.gs);
795 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs); 956 fix_rmode_seg(VCPU_SREG_FS, &vcpu->rmode.fs);
957
958 init_rmode_tss(vcpu->kvm);
796} 959}
797 960
798#ifdef CONFIG_X86_64 961#ifdef CONFIG_X86_64
@@ -837,6 +1000,8 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
837 1000
838static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1001static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
839{ 1002{
1003 vmx_fpu_deactivate(vcpu);
1004
840 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK)) 1005 if (vcpu->rmode.active && (cr0 & CR0_PE_MASK))
841 enter_pmode(vcpu); 1006 enter_pmode(vcpu);
842 1007
@@ -852,26 +1017,20 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
852 } 1017 }
853#endif 1018#endif
854 1019
855 if (!(cr0 & CR0_TS_MASK)) {
856 vcpu->fpu_active = 1;
857 vmcs_clear_bits(EXCEPTION_BITMAP, CR0_TS_MASK);
858 }
859
860 vmcs_writel(CR0_READ_SHADOW, cr0); 1020 vmcs_writel(CR0_READ_SHADOW, cr0);
861 vmcs_writel(GUEST_CR0, 1021 vmcs_writel(GUEST_CR0,
862 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); 1022 (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON);
863 vcpu->cr0 = cr0; 1023 vcpu->cr0 = cr0;
1024
1025 if (!(cr0 & CR0_TS_MASK) || !(cr0 & CR0_PE_MASK))
1026 vmx_fpu_activate(vcpu);
864} 1027}
865 1028
866static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) 1029static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
867{ 1030{
868 vmcs_writel(GUEST_CR3, cr3); 1031 vmcs_writel(GUEST_CR3, cr3);
869 1032 if (vcpu->cr0 & CR0_PE_MASK)
870 if (!(vcpu->cr0 & CR0_TS_MASK)) { 1033 vmx_fpu_deactivate(vcpu);
871 vcpu->fpu_active = 0;
872 vmcs_set_bits(GUEST_CR0, CR0_TS_MASK);
873 vmcs_set_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
874 }
875} 1034}
876 1035
877static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1036static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -937,23 +1096,11 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
937 var->unusable = (ar >> 16) & 1; 1096 var->unusable = (ar >> 16) & 1;
938} 1097}
939 1098
940static void vmx_set_segment(struct kvm_vcpu *vcpu, 1099static u32 vmx_segment_access_rights(struct kvm_segment *var)
941 struct kvm_segment *var, int seg)
942{ 1100{
943 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
944 u32 ar; 1101 u32 ar;
945 1102
946 vmcs_writel(sf->base, var->base); 1103 if (var->unusable)
947 vmcs_write32(sf->limit, var->limit);
948 vmcs_write16(sf->selector, var->selector);
949 if (vcpu->rmode.active && var->s) {
950 /*
951 * Hack real-mode segments into vm86 compatibility.
952 */
953 if (var->base == 0xffff0000 && var->selector == 0xf000)
954 vmcs_writel(sf->base, 0xf0000);
955 ar = 0xf3;
956 } else if (var->unusable)
957 ar = 1 << 16; 1104 ar = 1 << 16;
958 else { 1105 else {
959 ar = var->type & 15; 1106 ar = var->type & 15;
@@ -967,6 +1114,35 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
967 } 1114 }
968 if (ar == 0) /* a 0 value means unusable */ 1115 if (ar == 0) /* a 0 value means unusable */
969 ar = AR_UNUSABLE_MASK; 1116 ar = AR_UNUSABLE_MASK;
1117
1118 return ar;
1119}
1120
1121static void vmx_set_segment(struct kvm_vcpu *vcpu,
1122 struct kvm_segment *var, int seg)
1123{
1124 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1125 u32 ar;
1126
1127 if (vcpu->rmode.active && seg == VCPU_SREG_TR) {
1128 vcpu->rmode.tr.selector = var->selector;
1129 vcpu->rmode.tr.base = var->base;
1130 vcpu->rmode.tr.limit = var->limit;
1131 vcpu->rmode.tr.ar = vmx_segment_access_rights(var);
1132 return;
1133 }
1134 vmcs_writel(sf->base, var->base);
1135 vmcs_write32(sf->limit, var->limit);
1136 vmcs_write16(sf->selector, var->selector);
1137 if (vcpu->rmode.active && var->s) {
1138 /*
1139 * Hack real-mode segments into vm86 compatibility.
1140 */
1141 if (var->base == 0xffff0000 && var->selector == 0xf000)
1142 vmcs_writel(sf->base, 0xf0000);
1143 ar = 0xf3;
1144 } else
1145 ar = vmx_segment_access_rights(var);
970 vmcs_write32(sf->ar_bytes, ar); 1146 vmcs_write32(sf->ar_bytes, ar);
971} 1147}
972 1148
@@ -1018,16 +1194,16 @@ static int init_rmode_tss(struct kvm* kvm)
1018 } 1194 }
1019 1195
1020 page = kmap_atomic(p1, KM_USER0); 1196 page = kmap_atomic(p1, KM_USER0);
1021 memset(page, 0, PAGE_SIZE); 1197 clear_page(page);
1022 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; 1198 *(u16*)(page + 0x66) = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1023 kunmap_atomic(page, KM_USER0); 1199 kunmap_atomic(page, KM_USER0);
1024 1200
1025 page = kmap_atomic(p2, KM_USER0); 1201 page = kmap_atomic(p2, KM_USER0);
1026 memset(page, 0, PAGE_SIZE); 1202 clear_page(page);
1027 kunmap_atomic(page, KM_USER0); 1203 kunmap_atomic(page, KM_USER0);
1028 1204
1029 page = kmap_atomic(p3, KM_USER0); 1205 page = kmap_atomic(p3, KM_USER0);
1030 memset(page, 0, PAGE_SIZE); 1206 clear_page(page);
1031 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0; 1207 *(page + RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1) = ~0;
1032 kunmap_atomic(page, KM_USER0); 1208 kunmap_atomic(page, KM_USER0);
1033 1209
@@ -1066,7 +1242,7 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1066 struct descriptor_table dt; 1242 struct descriptor_table dt;
1067 int i; 1243 int i;
1068 int ret = 0; 1244 int ret = 0;
1069 extern asmlinkage void kvm_vmx_return(void); 1245 unsigned long kvm_vmx_return;
1070 1246
1071 if (!init_rmode_tss(vcpu->kvm)) { 1247 if (!init_rmode_tss(vcpu->kvm)) {
1072 ret = -ENOMEM; 1248 ret = -ENOMEM;
@@ -1076,9 +1252,9 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1076 memset(vcpu->regs, 0, sizeof(vcpu->regs)); 1252 memset(vcpu->regs, 0, sizeof(vcpu->regs));
1077 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val(); 1253 vcpu->regs[VCPU_REGS_RDX] = get_rdx_init_val();
1078 vcpu->cr8 = 0; 1254 vcpu->cr8 = 0;
1079 vcpu->apic_base = 0xfee00000 | 1255 vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
1080 /*for vcpu 0*/ MSR_IA32_APICBASE_BSP | 1256 if (vcpu == &vcpu->kvm->vcpus[0])
1081 MSR_IA32_APICBASE_ENABLE; 1257 vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
1082 1258
1083 fx_init(vcpu); 1259 fx_init(vcpu);
1084 1260
@@ -1129,8 +1305,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1129 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); 1305 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
1130 1306
1131 /* I/O */ 1307 /* I/O */
1132 vmcs_write64(IO_BITMAP_A, 0); 1308 vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
1133 vmcs_write64(IO_BITMAP_B, 0); 1309 vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
1134 1310
1135 guest_write_tsc(0); 1311 guest_write_tsc(0);
1136 1312
@@ -1150,12 +1326,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1150 CPU_BASED_HLT_EXITING /* 20.6.2 */ 1326 CPU_BASED_HLT_EXITING /* 20.6.2 */
1151 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */ 1327 | CPU_BASED_CR8_LOAD_EXITING /* 20.6.2 */
1152 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */ 1328 | CPU_BASED_CR8_STORE_EXITING /* 20.6.2 */
1153 | CPU_BASED_UNCOND_IO_EXITING /* 20.6.2 */ 1329 | CPU_BASED_ACTIVATE_IO_BITMAP /* 20.6.2 */
1154 | CPU_BASED_MOV_DR_EXITING 1330 | CPU_BASED_MOV_DR_EXITING
1155 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */ 1331 | CPU_BASED_USE_TSC_OFFSETING /* 21.3 */
1156 ); 1332 );
1157 1333
1158 vmcs_write32(EXCEPTION_BITMAP, 1 << PF_VECTOR);
1159 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); 1334 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
1160 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); 1335 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
1161 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ 1336 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
@@ -1185,8 +1360,11 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1185 get_idt(&dt); 1360 get_idt(&dt);
1186 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */ 1361 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
1187 1362
1188 1363 asm ("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
1189 vmcs_writel(HOST_RIP, (unsigned long)kvm_vmx_return); /* 22.2.5 */ 1364 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
1365 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1366 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
1367 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
1190 1368
1191 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); 1369 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
1192 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); 1370 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -1210,10 +1388,6 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1210 vcpu->host_msrs[j].reserved = 0; 1388 vcpu->host_msrs[j].reserved = 0;
1211 vcpu->host_msrs[j].data = data; 1389 vcpu->host_msrs[j].data = data;
1212 vcpu->guest_msrs[j] = vcpu->host_msrs[j]; 1390 vcpu->guest_msrs[j] = vcpu->host_msrs[j];
1213#ifdef CONFIG_X86_64
1214 if (index == MSR_KERNEL_GS_BASE)
1215 msr_offset_kernel_gs_base = j;
1216#endif
1217 ++vcpu->nmsrs; 1391 ++vcpu->nmsrs;
1218 } 1392 }
1219 1393
@@ -1241,6 +1415,8 @@ static int vmx_vcpu_setup(struct kvm_vcpu *vcpu)
1241#ifdef CONFIG_X86_64 1415#ifdef CONFIG_X86_64
1242 vmx_set_efer(vcpu, 0); 1416 vmx_set_efer(vcpu, 0);
1243#endif 1417#endif
1418 vmx_fpu_activate(vcpu);
1419 update_exception_bitmap(vcpu);
1244 1420
1245 return 0; 1421 return 0;
1246 1422
@@ -1365,7 +1541,11 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
1365 if (!vcpu->rmode.active) 1541 if (!vcpu->rmode.active)
1366 return 0; 1542 return 0;
1367 1543
1368 if (vec == GP_VECTOR && err_code == 0) 1544 /*
1545 * Instruction with address size override prefix opcode 0x67
1546 * Cause the #SS fault with 0 error code in VM86 mode.
1547 */
1548 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
1369 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE) 1549 if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE)
1370 return 1; 1550 return 1;
1371 return 0; 1551 return 0;
@@ -1400,10 +1580,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1400 } 1580 }
1401 1581
1402 if (is_no_device(intr_info)) { 1582 if (is_no_device(intr_info)) {
1403 vcpu->fpu_active = 1; 1583 vmx_fpu_activate(vcpu);
1404 vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
1405 if (!(vcpu->cr0 & CR0_TS_MASK))
1406 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
1407 return 1; 1584 return 1;
1408 } 1585 }
1409 1586
@@ -1445,8 +1622,13 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1445 1622
1446 if (vcpu->rmode.active && 1623 if (vcpu->rmode.active &&
1447 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, 1624 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
1448 error_code)) 1625 error_code)) {
1626 if (vcpu->halt_request) {
1627 vcpu->halt_request = 0;
1628 return kvm_emulate_halt(vcpu);
1629 }
1449 return 1; 1630 return 1;
1631 }
1450 1632
1451 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) { 1633 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == (INTR_TYPE_EXCEPTION | 1)) {
1452 kvm_run->exit_reason = KVM_EXIT_DEBUG; 1634 kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -1595,11 +1777,10 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1595 break; 1777 break;
1596 case 2: /* clts */ 1778 case 2: /* clts */
1597 vcpu_load_rsp_rip(vcpu); 1779 vcpu_load_rsp_rip(vcpu);
1598 vcpu->fpu_active = 1; 1780 vmx_fpu_deactivate(vcpu);
1599 vmcs_clear_bits(EXCEPTION_BITMAP, 1 << NM_VECTOR);
1600 vmcs_clear_bits(GUEST_CR0, CR0_TS_MASK);
1601 vcpu->cr0 &= ~CR0_TS_MASK; 1781 vcpu->cr0 &= ~CR0_TS_MASK;
1602 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0); 1782 vmcs_writel(CR0_READ_SHADOW, vcpu->cr0);
1783 vmx_fpu_activate(vcpu);
1603 skip_emulated_instruction(vcpu); 1784 skip_emulated_instruction(vcpu);
1604 return 1; 1785 return 1;
1605 case 1: /*mov from cr*/ 1786 case 1: /*mov from cr*/
@@ -1734,12 +1915,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
1734static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1915static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1735{ 1916{
1736 skip_emulated_instruction(vcpu); 1917 skip_emulated_instruction(vcpu);
1737 if (vcpu->irq_summary) 1918 return kvm_emulate_halt(vcpu);
1738 return 1;
1739
1740 kvm_run->exit_reason = KVM_EXIT_HLT;
1741 ++vcpu->stat.halt_exits;
1742 return 0;
1743} 1919}
1744 1920
1745static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1921static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -1770,7 +1946,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
1770}; 1946};
1771 1947
1772static const int kvm_vmx_max_exit_handlers = 1948static const int kvm_vmx_max_exit_handlers =
1773 sizeof(kvm_vmx_exit_handlers) / sizeof(*kvm_vmx_exit_handlers); 1949 ARRAY_SIZE(kvm_vmx_exit_handlers);
1774 1950
1775/* 1951/*
1776 * The guest has exited. See if we can fix it or if we need userspace 1952 * The guest has exited. See if we can fix it or if we need userspace
@@ -1810,61 +1986,44 @@ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1810 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF)); 1986 (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF));
1811} 1987}
1812 1988
1989static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1990{
1991}
1992
1813static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1993static int vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1814{ 1994{
1815 u8 fail; 1995 u8 fail;
1816 u16 fs_sel, gs_sel, ldt_sel;
1817 int fs_gs_ldt_reload_needed;
1818 int r; 1996 int r;
1819 1997
1820again: 1998preempted:
1821 /* 1999 if (vcpu->guest_debug.enabled)
1822 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not 2000 kvm_guest_debug_pre(vcpu);
1823 * allow segment selectors with cpl > 0 or ti == 1.
1824 */
1825 fs_sel = read_fs();
1826 gs_sel = read_gs();
1827 ldt_sel = read_ldt();
1828 fs_gs_ldt_reload_needed = (fs_sel & 7) | (gs_sel & 7) | ldt_sel;
1829 if (!fs_gs_ldt_reload_needed) {
1830 vmcs_write16(HOST_FS_SELECTOR, fs_sel);
1831 vmcs_write16(HOST_GS_SELECTOR, gs_sel);
1832 } else {
1833 vmcs_write16(HOST_FS_SELECTOR, 0);
1834 vmcs_write16(HOST_GS_SELECTOR, 0);
1835 }
1836
1837#ifdef CONFIG_X86_64
1838 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1839 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1840#else
1841 vmcs_writel(HOST_FS_BASE, segment_base(fs_sel));
1842 vmcs_writel(HOST_GS_BASE, segment_base(gs_sel));
1843#endif
1844 2001
2002again:
1845 if (!vcpu->mmio_read_completed) 2003 if (!vcpu->mmio_read_completed)
1846 do_interrupt_requests(vcpu, kvm_run); 2004 do_interrupt_requests(vcpu, kvm_run);
1847 2005
1848 if (vcpu->guest_debug.enabled) 2006 vmx_save_host_state(vcpu);
1849 kvm_guest_debug_pre(vcpu);
1850
1851 kvm_load_guest_fpu(vcpu); 2007 kvm_load_guest_fpu(vcpu);
1852 2008
2009 r = kvm_mmu_reload(vcpu);
2010 if (unlikely(r))
2011 goto out;
2012
1853 /* 2013 /*
1854 * Loading guest fpu may have cleared host cr0.ts 2014 * Loading guest fpu may have cleared host cr0.ts
1855 */ 2015 */
1856 vmcs_writel(HOST_CR0, read_cr0()); 2016 vmcs_writel(HOST_CR0, read_cr0());
1857 2017
1858#ifdef CONFIG_X86_64 2018 local_irq_disable();
1859 if (is_long_mode(vcpu)) { 2019
1860 save_msrs(vcpu->host_msrs + msr_offset_kernel_gs_base, 1); 2020 vcpu->guest_mode = 1;
1861 load_msrs(vcpu->guest_msrs, NR_BAD_MSRS); 2021 if (vcpu->requests)
1862 } 2022 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
1863#endif 2023 vmx_flush_tlb(vcpu);
1864 2024
1865 asm ( 2025 asm (
1866 /* Store host registers */ 2026 /* Store host registers */
1867 "pushf \n\t"
1868#ifdef CONFIG_X86_64 2027#ifdef CONFIG_X86_64
1869 "push %%rax; push %%rbx; push %%rdx;" 2028 "push %%rax; push %%rbx; push %%rdx;"
1870 "push %%rsi; push %%rdi; push %%rbp;" 2029 "push %%rsi; push %%rdi; push %%rbp;"
@@ -1909,12 +2068,11 @@ again:
1909 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */ 2068 "mov %c[rcx](%3), %%ecx \n\t" /* kills %3 (ecx) */
1910#endif 2069#endif
1911 /* Enter guest mode */ 2070 /* Enter guest mode */
1912 "jne launched \n\t" 2071 "jne .Llaunched \n\t"
1913 ASM_VMX_VMLAUNCH "\n\t" 2072 ASM_VMX_VMLAUNCH "\n\t"
1914 "jmp kvm_vmx_return \n\t" 2073 "jmp .Lkvm_vmx_return \n\t"
1915 "launched: " ASM_VMX_VMRESUME "\n\t" 2074 ".Llaunched: " ASM_VMX_VMRESUME "\n\t"
1916 ".globl kvm_vmx_return \n\t" 2075 ".Lkvm_vmx_return: "
1917 "kvm_vmx_return: "
1918 /* Save guest registers, load host registers, keep flags */ 2076 /* Save guest registers, load host registers, keep flags */
1919#ifdef CONFIG_X86_64 2077#ifdef CONFIG_X86_64
1920 "xchg %3, (%%rsp) \n\t" 2078 "xchg %3, (%%rsp) \n\t"
@@ -1957,7 +2115,6 @@ again:
1957 "pop %%ecx; popa \n\t" 2115 "pop %%ecx; popa \n\t"
1958#endif 2116#endif
1959 "setbe %0 \n\t" 2117 "setbe %0 \n\t"
1960 "popf \n\t"
1961 : "=q" (fail) 2118 : "=q" (fail)
1962 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP), 2119 : "r"(vcpu->launched), "d"((unsigned long)HOST_RSP),
1963 "c"(vcpu), 2120 "c"(vcpu),
@@ -1981,84 +2138,61 @@ again:
1981 [cr2]"i"(offsetof(struct kvm_vcpu, cr2)) 2138 [cr2]"i"(offsetof(struct kvm_vcpu, cr2))
1982 : "cc", "memory" ); 2139 : "cc", "memory" );
1983 2140
1984 /* 2141 vcpu->guest_mode = 0;
1985 * Reload segment selectors ASAP. (it's needed for a functional 2142 local_irq_enable();
1986 * kernel: x86 relies on having __KERNEL_PDA in %fs and x86_64
1987 * relies on having 0 in %gs for the CPU PDA to work.)
1988 */
1989 if (fs_gs_ldt_reload_needed) {
1990 load_ldt(ldt_sel);
1991 load_fs(fs_sel);
1992 /*
1993 * If we have to reload gs, we must take care to
1994 * preserve our gs base.
1995 */
1996 local_irq_disable();
1997 load_gs(gs_sel);
1998#ifdef CONFIG_X86_64
1999 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
2000#endif
2001 local_irq_enable();
2002 2143
2003 reload_tss();
2004 }
2005 ++vcpu->stat.exits; 2144 ++vcpu->stat.exits;
2006 2145
2007#ifdef CONFIG_X86_64
2008 if (is_long_mode(vcpu)) {
2009 save_msrs(vcpu->guest_msrs, NR_BAD_MSRS);
2010 load_msrs(vcpu->host_msrs, NR_BAD_MSRS);
2011 }
2012#endif
2013
2014 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0; 2146 vcpu->interrupt_window_open = (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & 3) == 0;
2015 2147
2016 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); 2148 asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
2017 2149
2018 if (fail) { 2150 if (unlikely(fail)) {
2019 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2151 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2020 kvm_run->fail_entry.hardware_entry_failure_reason 2152 kvm_run->fail_entry.hardware_entry_failure_reason
2021 = vmcs_read32(VM_INSTRUCTION_ERROR); 2153 = vmcs_read32(VM_INSTRUCTION_ERROR);
2022 r = 0; 2154 r = 0;
2023 } else { 2155 goto out;
2024 /* 2156 }
2025 * Profile KVM exit RIPs: 2157 /*
2026 */ 2158 * Profile KVM exit RIPs:
2027 if (unlikely(prof_on == KVM_PROFILING)) 2159 */
2028 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP)); 2160 if (unlikely(prof_on == KVM_PROFILING))
2029 2161 profile_hit(KVM_PROFILING, (void *)vmcs_readl(GUEST_RIP));
2030 vcpu->launched = 1; 2162
2031 r = kvm_handle_exit(kvm_run, vcpu); 2163 vcpu->launched = 1;
2032 if (r > 0) { 2164 r = kvm_handle_exit(kvm_run, vcpu);
2033 /* Give scheduler a change to reschedule. */ 2165 if (r > 0) {
2034 if (signal_pending(current)) { 2166 /* Give scheduler a change to reschedule. */
2035 ++vcpu->stat.signal_exits; 2167 if (signal_pending(current)) {
2036 post_kvm_run_save(vcpu, kvm_run); 2168 r = -EINTR;
2037 kvm_run->exit_reason = KVM_EXIT_INTR; 2169 kvm_run->exit_reason = KVM_EXIT_INTR;
2038 return -EINTR; 2170 ++vcpu->stat.signal_exits;
2039 } 2171 goto out;
2040 2172 }
2041 if (dm_request_for_irq_injection(vcpu, kvm_run)) { 2173
2042 ++vcpu->stat.request_irq_exits; 2174 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2043 post_kvm_run_save(vcpu, kvm_run); 2175 r = -EINTR;
2044 kvm_run->exit_reason = KVM_EXIT_INTR; 2176 kvm_run->exit_reason = KVM_EXIT_INTR;
2045 return -EINTR; 2177 ++vcpu->stat.request_irq_exits;
2046 } 2178 goto out;
2047 2179 }
2048 kvm_resched(vcpu); 2180 if (!need_resched()) {
2181 ++vcpu->stat.light_exits;
2049 goto again; 2182 goto again;
2050 } 2183 }
2051 } 2184 }
2052 2185
2186out:
2187 if (r > 0) {
2188 kvm_resched(vcpu);
2189 goto preempted;
2190 }
2191
2053 post_kvm_run_save(vcpu, kvm_run); 2192 post_kvm_run_save(vcpu, kvm_run);
2054 return r; 2193 return r;
2055} 2194}
2056 2195
2057static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
2058{
2059 vmcs_writel(GUEST_CR3, vmcs_readl(GUEST_CR3));
2060}
2061
2062static void vmx_inject_page_fault(struct kvm_vcpu *vcpu, 2196static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,
2063 unsigned long addr, 2197 unsigned long addr,
2064 u32 err_code) 2198 u32 err_code)
@@ -2122,7 +2256,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2122 vmcs_clear(vmcs); 2256 vmcs_clear(vmcs);
2123 vcpu->vmcs = vmcs; 2257 vcpu->vmcs = vmcs;
2124 vcpu->launched = 0; 2258 vcpu->launched = 0;
2125 vcpu->fpu_active = 1;
2126 2259
2127 return 0; 2260 return 0;
2128 2261
@@ -2188,11 +2321,50 @@ static struct kvm_arch_ops vmx_arch_ops = {
2188 2321
2189static int __init vmx_init(void) 2322static int __init vmx_init(void)
2190{ 2323{
2191 return kvm_init_arch(&vmx_arch_ops, THIS_MODULE); 2324 void *iova;
2325 int r;
2326
2327 vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2328 if (!vmx_io_bitmap_a)
2329 return -ENOMEM;
2330
2331 vmx_io_bitmap_b = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
2332 if (!vmx_io_bitmap_b) {
2333 r = -ENOMEM;
2334 goto out;
2335 }
2336
2337 /*
2338 * Allow direct access to the PC debug port (it is often used for I/O
2339 * delays, but the vmexits simply slow things down).
2340 */
2341 iova = kmap(vmx_io_bitmap_a);
2342 memset(iova, 0xff, PAGE_SIZE);
2343 clear_bit(0x80, iova);
2344 kunmap(vmx_io_bitmap_a);
2345
2346 iova = kmap(vmx_io_bitmap_b);
2347 memset(iova, 0xff, PAGE_SIZE);
2348 kunmap(vmx_io_bitmap_b);
2349
2350 r = kvm_init_arch(&vmx_arch_ops, THIS_MODULE);
2351 if (r)
2352 goto out1;
2353
2354 return 0;
2355
2356out1:
2357 __free_page(vmx_io_bitmap_b);
2358out:
2359 __free_page(vmx_io_bitmap_a);
2360 return r;
2192} 2361}
2193 2362
2194static void __exit vmx_exit(void) 2363static void __exit vmx_exit(void)
2195{ 2364{
2365 __free_page(vmx_io_bitmap_b);
2366 __free_page(vmx_io_bitmap_a);
2367
2196 kvm_exit_arch(); 2368 kvm_exit_arch();
2197} 2369}
2198 2370
diff --git a/drivers/kvm/x86_emulate.c b/drivers/kvm/x86_emulate.c
index 7ade09086aa5..f60012d62610 100644
--- a/drivers/kvm/x86_emulate.c
+++ b/drivers/kvm/x86_emulate.c
@@ -98,8 +98,11 @@ static u8 opcode_table[256] = {
98 0, 0, 0, 0, 98 0, 0, 0, 0,
99 /* 0x40 - 0x4F */ 99 /* 0x40 - 0x4F */
100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
101 /* 0x50 - 0x5F */ 101 /* 0x50 - 0x57 */
102 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102 0, 0, 0, 0, 0, 0, 0, 0,
103 /* 0x58 - 0x5F */
104 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
105 ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
103 /* 0x60 - 0x6F */ 106 /* 0x60 - 0x6F */
104 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , 107 0, 0, 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ ,
105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -128,9 +131,9 @@ static u8 opcode_table[256] = {
128 /* 0xB0 - 0xBF */ 131 /* 0xB0 - 0xBF */
129 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 132 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
130 /* 0xC0 - 0xC7 */ 133 /* 0xC0 - 0xC7 */
131 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, 0, 0, 134 ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
132 0, 0, ByteOp | DstMem | SrcImm | ModRM | Mov, 135 0, ImplicitOps, 0, 0,
133 DstMem | SrcImm | ModRM | Mov, 136 ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov,
134 /* 0xC8 - 0xCF */ 137 /* 0xC8 - 0xCF */
135 0, 0, 0, 0, 0, 0, 0, 0, 138 0, 0, 0, 0, 0, 0, 0, 0,
136 /* 0xD0 - 0xD7 */ 139 /* 0xD0 - 0xD7 */
@@ -143,7 +146,8 @@ static u8 opcode_table[256] = {
143 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
144 /* 0xF0 - 0xF7 */ 147 /* 0xF0 - 0xF7 */
145 0, 0, 0, 0, 148 0, 0, 0, 0,
146 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 149 ImplicitOps, 0,
150 ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
147 /* 0xF8 - 0xFF */ 151 /* 0xF8 - 0xFF */
148 0, 0, 0, 0, 152 0, 0, 0, 0,
149 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM 153 0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
@@ -152,7 +156,7 @@ static u8 opcode_table[256] = {
152static u16 twobyte_table[256] = { 156static u16 twobyte_table[256] = {
153 /* 0x00 - 0x0F */ 157 /* 0x00 - 0x0F */
154 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0, 158 0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
155 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 159 0, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
156 /* 0x10 - 0x1F */ 160 /* 0x10 - 0x1F */
157 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 161 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
158 /* 0x20 - 0x2F */ 162 /* 0x20 - 0x2F */
@@ -481,6 +485,7 @@ x86_emulate_memop(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
481 int mode = ctxt->mode; 485 int mode = ctxt->mode;
482 unsigned long modrm_ea; 486 unsigned long modrm_ea;
483 int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0; 487 int use_modrm_ea, index_reg = 0, base_reg = 0, scale, rip_relative = 0;
488 int no_wb = 0;
484 489
485 /* Shadow copy of register state. Committed on successful emulation. */ 490 /* Shadow copy of register state. Committed on successful emulation. */
486 unsigned long _regs[NR_VCPU_REGS]; 491 unsigned long _regs[NR_VCPU_REGS];
@@ -1047,7 +1052,7 @@ done_prefixes:
1047 _regs[VCPU_REGS_RSP]), 1052 _regs[VCPU_REGS_RSP]),
1048 &dst.val, dst.bytes, ctxt)) != 0) 1053 &dst.val, dst.bytes, ctxt)) != 0)
1049 goto done; 1054 goto done;
1050 dst.val = dst.orig_val; /* skanky: disable writeback */ 1055 no_wb = 1;
1051 break; 1056 break;
1052 default: 1057 default:
1053 goto cannot_emulate; 1058 goto cannot_emulate;
@@ -1056,7 +1061,7 @@ done_prefixes:
1056 } 1061 }
1057 1062
1058writeback: 1063writeback:
1059 if ((d & Mov) || (dst.orig_val != dst.val)) { 1064 if (!no_wb) {
1060 switch (dst.type) { 1065 switch (dst.type) {
1061 case OP_REG: 1066 case OP_REG:
1062 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 1067 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -1149,6 +1154,23 @@ special_insn:
1149 case 0xae ... 0xaf: /* scas */ 1154 case 0xae ... 0xaf: /* scas */
1150 DPRINTF("Urk! I don't handle SCAS.\n"); 1155 DPRINTF("Urk! I don't handle SCAS.\n");
1151 goto cannot_emulate; 1156 goto cannot_emulate;
1157 case 0xf4: /* hlt */
1158 ctxt->vcpu->halt_request = 1;
1159 goto done;
1160 case 0xc3: /* ret */
1161 dst.ptr = &_eip;
1162 goto pop_instruction;
1163 case 0x58 ... 0x5f: /* pop reg */
1164 dst.ptr = (unsigned long *)&_regs[b & 0x7];
1165
1166pop_instruction:
1167 if ((rc = ops->read_std(register_address(ctxt->ss_base,
1168 _regs[VCPU_REGS_RSP]), dst.ptr, op_bytes, ctxt)) != 0)
1169 goto done;
1170
1171 register_address_increment(_regs[VCPU_REGS_RSP], op_bytes);
1172 no_wb = 1; /* Disable writeback. */
1173 break;
1152 } 1174 }
1153 goto writeback; 1175 goto writeback;
1154 1176
@@ -1302,8 +1324,10 @@ twobyte_insn:
1302 1324
1303twobyte_special_insn: 1325twobyte_special_insn:
1304 /* Disable writeback. */ 1326 /* Disable writeback. */
1305 dst.orig_val = dst.val; 1327 no_wb = 1;
1306 switch (b) { 1328 switch (b) {
1329 case 0x09: /* wbinvd */
1330 break;
1307 case 0x0d: /* GrpP (prefetch) */ 1331 case 0x0d: /* GrpP (prefetch) */
1308 case 0x18: /* Grp16 (prefetch/nop) */ 1332 case 0x18: /* Grp16 (prefetch/nop) */
1309 break; 1333 break;
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index bd55e6ab99fc..f25685b9b7cf 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -335,6 +335,7 @@ static int monitor_task(void *arg)
335{ 335{
336 struct thermostat* th = arg; 336 struct thermostat* th = arg;
337 337
338 set_freezable();
338 while(!kthread_should_stop()) { 339 while(!kthread_should_stop()) {
339 try_to_freeze(); 340 try_to_freeze();
340 msleep_interruptible(2000); 341 msleep_interruptible(2000);
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 4fcb245ba184..e18d265d5d33 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -92,6 +92,7 @@ static int wf_thread_func(void *data)
92 92
93 DBG("wf: thread started\n"); 93 DBG("wf: thread started\n");
94 94
95 set_freezable();
95 while(!kthread_should_stop()) { 96 while(!kthread_should_stop()) {
96 if (time_after_eq(jiffies, next)) { 97 if (time_after_eq(jiffies, next)) {
97 wf_notify(WF_EVENT_TICK, NULL); 98 wf_notify(WF_EVENT_TICK, NULL);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 64bf3a81db93..531d4d17d011 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -2,19 +2,17 @@
2# Block device driver configuration 2# Block device driver configuration
3# 3#
4 4
5if BLOCK 5menuconfig MD
6
7menu "Multi-device support (RAID and LVM)"
8
9config MD
10 bool "Multiple devices driver support (RAID and LVM)" 6 bool "Multiple devices driver support (RAID and LVM)"
7 depends on BLOCK
11 help 8 help
12 Support multiple physical spindles through a single logical device. 9 Support multiple physical spindles through a single logical device.
13 Required for RAID and logical volume management. 10 Required for RAID and logical volume management.
14 11
12if MD
13
15config BLK_DEV_MD 14config BLK_DEV_MD
16 tristate "RAID support" 15 tristate "RAID support"
17 depends on MD
18 ---help--- 16 ---help---
19 This driver lets you combine several hard disk partitions into one 17 This driver lets you combine several hard disk partitions into one
20 logical block device. This can be used to simply append one 18 logical block device. This can be used to simply append one
@@ -191,7 +189,6 @@ config MD_FAULTY
191 189
192config BLK_DEV_DM 190config BLK_DEV_DM
193 tristate "Device mapper support" 191 tristate "Device mapper support"
194 depends on MD
195 ---help--- 192 ---help---
196 Device-mapper is a low level volume manager. It works by allowing 193 Device-mapper is a low level volume manager. It works by allowing
197 people to specify mappings for ranges of logical sectors. Various 194 people to specify mappings for ranges of logical sectors. Various
@@ -279,6 +276,4 @@ config DM_DELAY
279 276
280 If unsure, say N. 277 If unsure, say N.
281 278
282endmenu 279endif # MD
283
284endif
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9620d452d030..927cb34c4805 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -268,6 +268,31 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
268 if (page->index == bitmap->file_pages-1) 268 if (page->index == bitmap->file_pages-1)
269 size = roundup(bitmap->last_page_size, 269 size = roundup(bitmap->last_page_size,
270 bdev_hardsect_size(rdev->bdev)); 270 bdev_hardsect_size(rdev->bdev));
271 /* Just make sure we aren't corrupting data or
272 * metadata
273 */
274 if (bitmap->offset < 0) {
275 /* DATA BITMAP METADATA */
276 if (bitmap->offset
277 + page->index * (PAGE_SIZE/512)
278 + size/512 > 0)
279 /* bitmap runs in to metadata */
280 return -EINVAL;
281 if (rdev->data_offset + mddev->size*2
282 > rdev->sb_offset*2 + bitmap->offset)
283 /* data runs in to bitmap */
284 return -EINVAL;
285 } else if (rdev->sb_offset*2 < rdev->data_offset) {
286 /* METADATA BITMAP DATA */
287 if (rdev->sb_offset*2
288 + bitmap->offset
289 + page->index*(PAGE_SIZE/512) + size/512
290 > rdev->data_offset)
291 /* bitmap runs in to data */
292 return -EINVAL;
293 } else {
294 /* DATA METADATA BITMAP - no problems */
295 }
271 md_super_write(mddev, rdev, 296 md_super_write(mddev, rdev,
272 (rdev->sb_offset<<1) + bitmap->offset 297 (rdev->sb_offset<<1) + bitmap->offset
273 + page->index * (PAGE_SIZE/512), 298 + page->index * (PAGE_SIZE/512),
@@ -280,32 +305,38 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
280 return 0; 305 return 0;
281} 306}
282 307
308static void bitmap_file_kick(struct bitmap *bitmap);
283/* 309/*
284 * write out a page to a file 310 * write out a page to a file
285 */ 311 */
286static int write_page(struct bitmap *bitmap, struct page *page, int wait) 312static void write_page(struct bitmap *bitmap, struct page *page, int wait)
287{ 313{
288 struct buffer_head *bh; 314 struct buffer_head *bh;
289 315
290 if (bitmap->file == NULL) 316 if (bitmap->file == NULL) {
291 return write_sb_page(bitmap, page, wait); 317 switch (write_sb_page(bitmap, page, wait)) {
318 case -EINVAL:
319 bitmap->flags |= BITMAP_WRITE_ERROR;
320 }
321 } else {
292 322
293 bh = page_buffers(page); 323 bh = page_buffers(page);
294 324
295 while (bh && bh->b_blocknr) { 325 while (bh && bh->b_blocknr) {
296 atomic_inc(&bitmap->pending_writes); 326 atomic_inc(&bitmap->pending_writes);
297 set_buffer_locked(bh); 327 set_buffer_locked(bh);
298 set_buffer_mapped(bh); 328 set_buffer_mapped(bh);
299 submit_bh(WRITE, bh); 329 submit_bh(WRITE, bh);
300 bh = bh->b_this_page; 330 bh = bh->b_this_page;
301 } 331 }
302 332
303 if (wait) { 333 if (wait) {
304 wait_event(bitmap->write_wait, 334 wait_event(bitmap->write_wait,
305 atomic_read(&bitmap->pending_writes)==0); 335 atomic_read(&bitmap->pending_writes)==0);
306 return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0; 336 }
307 } 337 }
308 return 0; 338 if (bitmap->flags & BITMAP_WRITE_ERROR)
339 bitmap_file_kick(bitmap);
309} 340}
310 341
311static void end_bitmap_write(struct buffer_head *bh, int uptodate) 342static void end_bitmap_write(struct buffer_head *bh, int uptodate)
@@ -425,17 +456,17 @@ out:
425 */ 456 */
426 457
427/* update the event counter and sync the superblock to disk */ 458/* update the event counter and sync the superblock to disk */
428int bitmap_update_sb(struct bitmap *bitmap) 459void bitmap_update_sb(struct bitmap *bitmap)
429{ 460{
430 bitmap_super_t *sb; 461 bitmap_super_t *sb;
431 unsigned long flags; 462 unsigned long flags;
432 463
433 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ 464 if (!bitmap || !bitmap->mddev) /* no bitmap for this array */
434 return 0; 465 return;
435 spin_lock_irqsave(&bitmap->lock, flags); 466 spin_lock_irqsave(&bitmap->lock, flags);
436 if (!bitmap->sb_page) { /* no superblock */ 467 if (!bitmap->sb_page) { /* no superblock */
437 spin_unlock_irqrestore(&bitmap->lock, flags); 468 spin_unlock_irqrestore(&bitmap->lock, flags);
438 return 0; 469 return;
439 } 470 }
440 spin_unlock_irqrestore(&bitmap->lock, flags); 471 spin_unlock_irqrestore(&bitmap->lock, flags);
441 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 472 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -443,7 +474,7 @@ int bitmap_update_sb(struct bitmap *bitmap)
443 if (!bitmap->mddev->degraded) 474 if (!bitmap->mddev->degraded)
444 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 475 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
445 kunmap_atomic(sb, KM_USER0); 476 kunmap_atomic(sb, KM_USER0);
446 return write_page(bitmap, bitmap->sb_page, 1); 477 write_page(bitmap, bitmap->sb_page, 1);
447} 478}
448 479
449/* print out the bitmap file superblock */ 480/* print out the bitmap file superblock */
@@ -572,20 +603,22 @@ enum bitmap_mask_op {
572 MASK_UNSET 603 MASK_UNSET
573}; 604};
574 605
575/* record the state of the bitmap in the superblock */ 606/* record the state of the bitmap in the superblock. Return the old value */
576static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits, 607static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
577 enum bitmap_mask_op op) 608 enum bitmap_mask_op op)
578{ 609{
579 bitmap_super_t *sb; 610 bitmap_super_t *sb;
580 unsigned long flags; 611 unsigned long flags;
612 int old;
581 613
582 spin_lock_irqsave(&bitmap->lock, flags); 614 spin_lock_irqsave(&bitmap->lock, flags);
583 if (!bitmap->sb_page) { /* can't set the state */ 615 if (!bitmap->sb_page) { /* can't set the state */
584 spin_unlock_irqrestore(&bitmap->lock, flags); 616 spin_unlock_irqrestore(&bitmap->lock, flags);
585 return; 617 return 0;
586 } 618 }
587 spin_unlock_irqrestore(&bitmap->lock, flags); 619 spin_unlock_irqrestore(&bitmap->lock, flags);
588 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0); 620 sb = (bitmap_super_t *)kmap_atomic(bitmap->sb_page, KM_USER0);
621 old = le32_to_cpu(sb->state) & bits;
589 switch (op) { 622 switch (op) {
590 case MASK_SET: sb->state |= cpu_to_le32(bits); 623 case MASK_SET: sb->state |= cpu_to_le32(bits);
591 break; 624 break;
@@ -594,6 +627,7 @@ static void bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
594 default: BUG(); 627 default: BUG();
595 } 628 }
596 kunmap_atomic(sb, KM_USER0); 629 kunmap_atomic(sb, KM_USER0);
630 return old;
597} 631}
598 632
599/* 633/*
@@ -687,18 +721,23 @@ static void bitmap_file_kick(struct bitmap *bitmap)
687{ 721{
688 char *path, *ptr = NULL; 722 char *path, *ptr = NULL;
689 723
690 bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET); 724 if (bitmap_mask_state(bitmap, BITMAP_STALE, MASK_SET) == 0) {
691 bitmap_update_sb(bitmap); 725 bitmap_update_sb(bitmap);
692 726
693 if (bitmap->file) { 727 if (bitmap->file) {
694 path = kmalloc(PAGE_SIZE, GFP_KERNEL); 728 path = kmalloc(PAGE_SIZE, GFP_KERNEL);
695 if (path) 729 if (path)
696 ptr = file_path(bitmap->file, path, PAGE_SIZE); 730 ptr = file_path(bitmap->file, path, PAGE_SIZE);
697 731
698 printk(KERN_ALERT "%s: kicking failed bitmap file %s from array!\n", 732 printk(KERN_ALERT
699 bmname(bitmap), ptr ? ptr : ""); 733 "%s: kicking failed bitmap file %s from array!\n",
734 bmname(bitmap), ptr ? ptr : "");
700 735
701 kfree(path); 736 kfree(path);
737 } else
738 printk(KERN_ALERT
739 "%s: disabling internal bitmap due to errors\n",
740 bmname(bitmap));
702 } 741 }
703 742
704 bitmap_file_put(bitmap); 743 bitmap_file_put(bitmap);
@@ -769,16 +808,15 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
769/* this gets called when the md device is ready to unplug its underlying 808/* this gets called when the md device is ready to unplug its underlying
770 * (slave) device queues -- before we let any writes go down, we need to 809 * (slave) device queues -- before we let any writes go down, we need to
771 * sync the dirty pages of the bitmap file to disk */ 810 * sync the dirty pages of the bitmap file to disk */
772int bitmap_unplug(struct bitmap *bitmap) 811void bitmap_unplug(struct bitmap *bitmap)
773{ 812{
774 unsigned long i, flags; 813 unsigned long i, flags;
775 int dirty, need_write; 814 int dirty, need_write;
776 struct page *page; 815 struct page *page;
777 int wait = 0; 816 int wait = 0;
778 int err;
779 817
780 if (!bitmap) 818 if (!bitmap)
781 return 0; 819 return;
782 820
783 /* look at each page to see if there are any set bits that need to be 821 /* look at each page to see if there are any set bits that need to be
784 * flushed out to disk */ 822 * flushed out to disk */
@@ -786,7 +824,7 @@ int bitmap_unplug(struct bitmap *bitmap)
786 spin_lock_irqsave(&bitmap->lock, flags); 824 spin_lock_irqsave(&bitmap->lock, flags);
787 if (!bitmap->filemap) { 825 if (!bitmap->filemap) {
788 spin_unlock_irqrestore(&bitmap->lock, flags); 826 spin_unlock_irqrestore(&bitmap->lock, flags);
789 return 0; 827 return;
790 } 828 }
791 page = bitmap->filemap[i]; 829 page = bitmap->filemap[i];
792 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 830 dirty = test_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -798,7 +836,7 @@ int bitmap_unplug(struct bitmap *bitmap)
798 spin_unlock_irqrestore(&bitmap->lock, flags); 836 spin_unlock_irqrestore(&bitmap->lock, flags);
799 837
800 if (dirty | need_write) 838 if (dirty | need_write)
801 err = write_page(bitmap, page, 0); 839 write_page(bitmap, page, 0);
802 } 840 }
803 if (wait) { /* if any writes were performed, we need to wait on them */ 841 if (wait) { /* if any writes were performed, we need to wait on them */
804 if (bitmap->file) 842 if (bitmap->file)
@@ -809,7 +847,6 @@ int bitmap_unplug(struct bitmap *bitmap)
809 } 847 }
810 if (bitmap->flags & BITMAP_WRITE_ERROR) 848 if (bitmap->flags & BITMAP_WRITE_ERROR)
811 bitmap_file_kick(bitmap); 849 bitmap_file_kick(bitmap);
812 return 0;
813} 850}
814 851
815static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); 852static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed);
@@ -858,21 +895,21 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
858 bmname(bitmap), 895 bmname(bitmap),
859 (unsigned long) i_size_read(file->f_mapping->host), 896 (unsigned long) i_size_read(file->f_mapping->host),
860 bytes + sizeof(bitmap_super_t)); 897 bytes + sizeof(bitmap_super_t));
861 goto out; 898 goto err;
862 } 899 }
863 900
864 ret = -ENOMEM; 901 ret = -ENOMEM;
865 902
866 bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL); 903 bitmap->filemap = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
867 if (!bitmap->filemap) 904 if (!bitmap->filemap)
868 goto out; 905 goto err;
869 906
870 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */ 907 /* We need 4 bits per page, rounded up to a multiple of sizeof(unsigned long) */
871 bitmap->filemap_attr = kzalloc( 908 bitmap->filemap_attr = kzalloc(
872 roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), 909 roundup( DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)),
873 GFP_KERNEL); 910 GFP_KERNEL);
874 if (!bitmap->filemap_attr) 911 if (!bitmap->filemap_attr)
875 goto out; 912 goto err;
876 913
877 oldindex = ~0L; 914 oldindex = ~0L;
878 915
@@ -905,7 +942,7 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
905 } 942 }
906 if (IS_ERR(page)) { /* read error */ 943 if (IS_ERR(page)) { /* read error */
907 ret = PTR_ERR(page); 944 ret = PTR_ERR(page);
908 goto out; 945 goto err;
909 } 946 }
910 947
911 oldindex = index; 948 oldindex = index;
@@ -920,11 +957,13 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
920 memset(paddr + offset, 0xff, 957 memset(paddr + offset, 0xff,
921 PAGE_SIZE - offset); 958 PAGE_SIZE - offset);
922 kunmap_atomic(paddr, KM_USER0); 959 kunmap_atomic(paddr, KM_USER0);
923 ret = write_page(bitmap, page, 1); 960 write_page(bitmap, page, 1);
924 if (ret) { 961
962 ret = -EIO;
963 if (bitmap->flags & BITMAP_WRITE_ERROR) {
925 /* release, page not in filemap yet */ 964 /* release, page not in filemap yet */
926 put_page(page); 965 put_page(page);
927 goto out; 966 goto err;
928 } 967 }
929 } 968 }
930 969
@@ -956,11 +995,15 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
956 md_wakeup_thread(bitmap->mddev->thread); 995 md_wakeup_thread(bitmap->mddev->thread);
957 } 996 }
958 997
959out:
960 printk(KERN_INFO "%s: bitmap initialized from disk: " 998 printk(KERN_INFO "%s: bitmap initialized from disk: "
961 "read %lu/%lu pages, set %lu bits, status: %d\n", 999 "read %lu/%lu pages, set %lu bits\n",
962 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, ret); 1000 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt);
1001
1002 return 0;
963 1003
1004 err:
1005 printk(KERN_INFO "%s: bitmap initialisation failed: %d\n",
1006 bmname(bitmap), ret);
964 return ret; 1007 return ret;
965} 1008}
966 1009
@@ -997,19 +1040,18 @@ static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
997 * out to disk 1040 * out to disk
998 */ 1041 */
999 1042
1000int bitmap_daemon_work(struct bitmap *bitmap) 1043void bitmap_daemon_work(struct bitmap *bitmap)
1001{ 1044{
1002 unsigned long j; 1045 unsigned long j;
1003 unsigned long flags; 1046 unsigned long flags;
1004 struct page *page = NULL, *lastpage = NULL; 1047 struct page *page = NULL, *lastpage = NULL;
1005 int err = 0;
1006 int blocks; 1048 int blocks;
1007 void *paddr; 1049 void *paddr;
1008 1050
1009 if (bitmap == NULL) 1051 if (bitmap == NULL)
1010 return 0; 1052 return;
1011 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ)) 1053 if (time_before(jiffies, bitmap->daemon_lastrun + bitmap->daemon_sleep*HZ))
1012 return 0; 1054 return;
1013 bitmap->daemon_lastrun = jiffies; 1055 bitmap->daemon_lastrun = jiffies;
1014 1056
1015 for (j = 0; j < bitmap->chunks; j++) { 1057 for (j = 0; j < bitmap->chunks; j++) {
@@ -1032,14 +1074,8 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1032 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE); 1074 clear_page_attr(bitmap, page, BITMAP_PAGE_NEEDWRITE);
1033 1075
1034 spin_unlock_irqrestore(&bitmap->lock, flags); 1076 spin_unlock_irqrestore(&bitmap->lock, flags);
1035 if (need_write) { 1077 if (need_write)
1036 switch (write_page(bitmap, page, 0)) { 1078 write_page(bitmap, page, 0);
1037 case 0:
1038 break;
1039 default:
1040 bitmap_file_kick(bitmap);
1041 }
1042 }
1043 continue; 1079 continue;
1044 } 1080 }
1045 1081
@@ -1048,13 +1084,11 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1048 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { 1084 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1049 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1085 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1050 spin_unlock_irqrestore(&bitmap->lock, flags); 1086 spin_unlock_irqrestore(&bitmap->lock, flags);
1051 err = write_page(bitmap, lastpage, 0); 1087 write_page(bitmap, lastpage, 0);
1052 } else { 1088 } else {
1053 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1089 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1054 spin_unlock_irqrestore(&bitmap->lock, flags); 1090 spin_unlock_irqrestore(&bitmap->lock, flags);
1055 } 1091 }
1056 if (err)
1057 bitmap_file_kick(bitmap);
1058 } else 1092 } else
1059 spin_unlock_irqrestore(&bitmap->lock, flags); 1093 spin_unlock_irqrestore(&bitmap->lock, flags);
1060 lastpage = page; 1094 lastpage = page;
@@ -1097,14 +1131,13 @@ int bitmap_daemon_work(struct bitmap *bitmap)
1097 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) { 1131 if (test_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE)) {
1098 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1132 clear_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1099 spin_unlock_irqrestore(&bitmap->lock, flags); 1133 spin_unlock_irqrestore(&bitmap->lock, flags);
1100 err = write_page(bitmap, lastpage, 0); 1134 write_page(bitmap, lastpage, 0);
1101 } else { 1135 } else {
1102 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE); 1136 set_page_attr(bitmap, lastpage, BITMAP_PAGE_NEEDWRITE);
1103 spin_unlock_irqrestore(&bitmap->lock, flags); 1137 spin_unlock_irqrestore(&bitmap->lock, flags);
1104 } 1138 }
1105 } 1139 }
1106 1140
1107 return err;
1108} 1141}
1109 1142
1110static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap, 1143static bitmap_counter_t *bitmap_get_counter(struct bitmap *bitmap,
@@ -1517,7 +1550,9 @@ int bitmap_create(mddev_t *mddev)
1517 1550
1518 mddev->thread->timeout = bitmap->daemon_sleep * HZ; 1551 mddev->thread->timeout = bitmap->daemon_sleep * HZ;
1519 1552
1520 return bitmap_update_sb(bitmap); 1553 bitmap_update_sb(bitmap);
1554
1555 return (bitmap->flags & BITMAP_WRITE_ERROR) ? -EIO : 0;
1521 1556
1522 error: 1557 error:
1523 bitmap_free(bitmap); 1558 bitmap_free(bitmap);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4f7d35561ab..846614e676c6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -161,9 +161,7 @@ static void local_exit(void)
161{ 161{
162 kmem_cache_destroy(_tio_cache); 162 kmem_cache_destroy(_tio_cache);
163 kmem_cache_destroy(_io_cache); 163 kmem_cache_destroy(_io_cache);
164 164 unregister_blkdev(_major, _name);
165 if (unregister_blkdev(_major, _name) < 0)
166 DMERR("unregister_blkdev failed");
167 165
168 _major = 0; 166 _major = 0;
169 167
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 33beaa7da085..65ddc887dfd7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1640,7 +1640,6 @@ static void sync_sbs(mddev_t * mddev, int nospares)
1640 1640
1641static void md_update_sb(mddev_t * mddev, int force_change) 1641static void md_update_sb(mddev_t * mddev, int force_change)
1642{ 1642{
1643 int err;
1644 struct list_head *tmp; 1643 struct list_head *tmp;
1645 mdk_rdev_t *rdev; 1644 mdk_rdev_t *rdev;
1646 int sync_req; 1645 int sync_req;
@@ -1727,7 +1726,7 @@ repeat:
1727 "md: updating %s RAID superblock on device (in sync %d)\n", 1726 "md: updating %s RAID superblock on device (in sync %d)\n",
1728 mdname(mddev),mddev->in_sync); 1727 mdname(mddev),mddev->in_sync);
1729 1728
1730 err = bitmap_update_sb(mddev->bitmap); 1729 bitmap_update_sb(mddev->bitmap);
1731 ITERATE_RDEV(mddev,rdev,tmp) { 1730 ITERATE_RDEV(mddev,rdev,tmp) {
1732 char b[BDEVNAME_SIZE]; 1731 char b[BDEVNAME_SIZE];
1733 dprintk(KERN_INFO "md: "); 1732 dprintk(KERN_INFO "md: ");
@@ -2073,9 +2072,11 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
2073 err = super_types[super_format]. 2072 err = super_types[super_format].
2074 load_super(rdev, NULL, super_minor); 2073 load_super(rdev, NULL, super_minor);
2075 if (err == -EINVAL) { 2074 if (err == -EINVAL) {
2076 printk(KERN_WARNING 2075 printk(KERN_WARNING
2077 "md: %s has invalid sb, not importing!\n", 2076 "md: %s does not have a valid v%d.%d "
2078 bdevname(rdev->bdev,b)); 2077 "superblock, not importing!\n",
2078 bdevname(rdev->bdev,b),
2079 super_format, super_minor);
2079 goto abort_free; 2080 goto abort_free;
2080 } 2081 }
2081 if (err < 0) { 2082 if (err < 0) {
@@ -3174,13 +3175,33 @@ static int do_md_run(mddev_t * mddev)
3174 * Drop all container device buffers, from now on 3175 * Drop all container device buffers, from now on
3175 * the only valid external interface is through the md 3176 * the only valid external interface is through the md
3176 * device. 3177 * device.
3177 * Also find largest hardsector size
3178 */ 3178 */
3179 ITERATE_RDEV(mddev,rdev,tmp) { 3179 ITERATE_RDEV(mddev,rdev,tmp) {
3180 if (test_bit(Faulty, &rdev->flags)) 3180 if (test_bit(Faulty, &rdev->flags))
3181 continue; 3181 continue;
3182 sync_blockdev(rdev->bdev); 3182 sync_blockdev(rdev->bdev);
3183 invalidate_bdev(rdev->bdev); 3183 invalidate_bdev(rdev->bdev);
3184
3185 /* perform some consistency tests on the device.
3186 * We don't want the data to overlap the metadata,
3187 * Internal Bitmap issues has handled elsewhere.
3188 */
3189 if (rdev->data_offset < rdev->sb_offset) {
3190 if (mddev->size &&
3191 rdev->data_offset + mddev->size*2
3192 > rdev->sb_offset*2) {
3193 printk("md: %s: data overlaps metadata\n",
3194 mdname(mddev));
3195 return -EINVAL;
3196 }
3197 } else {
3198 if (rdev->sb_offset*2 + rdev->sb_size/512
3199 > rdev->data_offset) {
3200 printk("md: %s: metadata overlaps data\n",
3201 mdname(mddev));
3202 return -EINVAL;
3203 }
3204 }
3184 } 3205 }
3185 3206
3186 md_probe(mddev->unit, NULL, NULL); 3207 md_probe(mddev->unit, NULL, NULL);
@@ -4642,7 +4663,6 @@ static int md_thread(void * arg)
4642 * many dirty RAID5 blocks. 4663 * many dirty RAID5 blocks.
4643 */ 4664 */
4644 4665
4645 current->flags |= PF_NOFREEZE;
4646 allow_signal(SIGKILL); 4666 allow_signal(SIGKILL);
4647 while (!kthread_should_stop()) { 4667 while (!kthread_should_stop()) {
4648 4668
@@ -5090,7 +5110,7 @@ static int is_mddev_idle(mddev_t *mddev)
5090 mdk_rdev_t * rdev; 5110 mdk_rdev_t * rdev;
5091 struct list_head *tmp; 5111 struct list_head *tmp;
5092 int idle; 5112 int idle;
5093 unsigned long curr_events; 5113 long curr_events;
5094 5114
5095 idle = 1; 5115 idle = 1;
5096 ITERATE_RDEV(mddev,rdev,tmp) { 5116 ITERATE_RDEV(mddev,rdev,tmp) {
@@ -5098,20 +5118,29 @@ static int is_mddev_idle(mddev_t *mddev)
5098 curr_events = disk_stat_read(disk, sectors[0]) + 5118 curr_events = disk_stat_read(disk, sectors[0]) +
5099 disk_stat_read(disk, sectors[1]) - 5119 disk_stat_read(disk, sectors[1]) -
5100 atomic_read(&disk->sync_io); 5120 atomic_read(&disk->sync_io);
5101 /* The difference between curr_events and last_events 5121 /* sync IO will cause sync_io to increase before the disk_stats
5102 * will be affected by any new non-sync IO (making 5122 * as sync_io is counted when a request starts, and
5103 * curr_events bigger) and any difference in the amount of 5123 * disk_stats is counted when it completes.
5104 * in-flight syncio (making current_events bigger or smaller) 5124 * So resync activity will cause curr_events to be smaller than
5105 * The amount in-flight is currently limited to 5125 * when there was no such activity.
5106 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 5126 * non-sync IO will cause disk_stat to increase without
5107 * which is at most 4096 sectors. 5127 * increasing sync_io so curr_events will (eventually)
5108 * These numbers are fairly fragile and should be made 5128 * be larger than it was before. Once it becomes
5109 * more robust, probably by enforcing the 5129 * substantially larger, the test below will cause
5110 * 'window size' that md_do_sync sort-of uses. 5130 * the array to appear non-idle, and resync will slow
5131 * down.
5132 * If there is a lot of outstanding resync activity when
5133 * we set last_event to curr_events, then all that activity
5134 * completing might cause the array to appear non-idle
5135 * and resync will be slowed down even though there might
5136 * not have been non-resync activity. This will only
5137 * happen once though. 'last_events' will soon reflect
5138 * the state where there is little or no outstanding
5139 * resync requests, and further resync activity will
5140 * always make curr_events less than last_events.
5111 * 5141 *
5112 * Note: the following is an unsigned comparison.
5113 */ 5142 */
5114 if ((long)curr_events - (long)rdev->last_events > 4096) { 5143 if (curr_events - rdev->last_events > 4096) {
5115 rdev->last_events = curr_events; 5144 rdev->last_events = curr_events;
5116 idle = 0; 5145 idle = 0;
5117 } 5146 }
@@ -5772,7 +5801,7 @@ static void autostart_arrays(int part)
5772 for (i = 0; i < dev_cnt; i++) { 5801 for (i = 0; i < dev_cnt; i++) {
5773 dev_t dev = detected_devices[i]; 5802 dev_t dev = detected_devices[i];
5774 5803
5775 rdev = md_import_device(dev,0, 0); 5804 rdev = md_import_device(dev,0, 90);
5776 if (IS_ERR(rdev)) 5805 if (IS_ERR(rdev))
5777 continue; 5806 continue;
5778 5807
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 46677d7d9980..00c78b77b13d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1526,8 +1526,7 @@ static void raid1d(mddev_t *mddev)
1526 blk_remove_plug(mddev->queue); 1526 blk_remove_plug(mddev->queue);
1527 spin_unlock_irqrestore(&conf->device_lock, flags); 1527 spin_unlock_irqrestore(&conf->device_lock, flags);
1528 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 1528 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1529 if (bitmap_unplug(mddev->bitmap) != 0) 1529 bitmap_unplug(mddev->bitmap);
1530 printk("%s: bitmap file write failed!\n", mdname(mddev));
1531 1530
1532 while (bio) { /* submit pending writes */ 1531 while (bio) { /* submit pending writes */
1533 struct bio *next = bio->bi_next; 1532 struct bio *next = bio->bi_next;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 9eb66c1b523b..a95ada1cfac4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1510,8 +1510,7 @@ static void raid10d(mddev_t *mddev)
1510 blk_remove_plug(mddev->queue); 1510 blk_remove_plug(mddev->queue);
1511 spin_unlock_irqrestore(&conf->device_lock, flags); 1511 spin_unlock_irqrestore(&conf->device_lock, flags);
1512 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ 1512 /* flush any pending bitmap writes to disk before proceeding w/ I/O */
1513 if (bitmap_unplug(mddev->bitmap) != 0) 1513 bitmap_unplug(mddev->bitmap);
1514 printk("%s: bitmap file write failed!\n", mdname(mddev));
1515 1514
1516 while (bio) { /* submit pending writes */ 1515 while (bio) { /* submit pending writes */
1517 struct bio *next = bio->bi_next; 1516 struct bio *next = bio->bi_next;
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index f4e4ca2dcade..b6c7f6610ec5 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -523,6 +523,7 @@ static int dvb_frontend_thread(void *data)
523 523
524 dvb_frontend_init(fe); 524 dvb_frontend_init(fe);
525 525
526 set_freezable();
526 while (1) { 527 while (1) {
527 up(&fepriv->sem); /* is locked when we enter the thread... */ 528 up(&fepriv->sem); /* is locked when we enter the thread... */
528restart: 529restart:
diff --git a/drivers/media/video/cx88/cx88-tvaudio.c b/drivers/media/video/cx88/cx88-tvaudio.c
index 259ea08e784f..1cc2d286a1cb 100644
--- a/drivers/media/video/cx88/cx88-tvaudio.c
+++ b/drivers/media/video/cx88/cx88-tvaudio.c
@@ -906,6 +906,7 @@ int cx88_audio_thread(void *data)
906 u32 mode = 0; 906 u32 mode = 0;
907 907
908 dprintk("cx88: tvaudio thread started\n"); 908 dprintk("cx88: tvaudio thread started\n");
909 set_freezable();
909 for (;;) { 910 for (;;) {
910 msleep_interruptible(1000); 911 msleep_interruptible(1000);
911 if (kthread_should_stop()) 912 if (kthread_should_stop())
diff --git a/drivers/media/video/msp3400-kthreads.c b/drivers/media/video/msp3400-kthreads.c
index e1821eb82fb5..d5ee2629121e 100644
--- a/drivers/media/video/msp3400-kthreads.c
+++ b/drivers/media/video/msp3400-kthreads.c
@@ -23,6 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/freezer.h>
26#include <linux/videodev.h> 27#include <linux/videodev.h>
27#include <linux/videodev2.h> 28#include <linux/videodev2.h>
28#include <media/v4l2-common.h> 29#include <media/v4l2-common.h>
@@ -468,6 +469,7 @@ int msp3400c_thread(void *data)
468 469
469 470
470 v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n"); 471 v4l_dbg(1, msp_debug, client, "msp3400 daemon started\n");
472 set_freezable();
471 for (;;) { 473 for (;;) {
472 v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n"); 474 v4l_dbg(2, msp_debug, client, "msp3400 thread: sleep\n");
473 msp_sleep(state, -1); 475 msp_sleep(state, -1);
@@ -646,7 +648,7 @@ int msp3410d_thread(void *data)
646 int val, i, std, count; 648 int val, i, std, count;
647 649
648 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n"); 650 v4l_dbg(1, msp_debug, client, "msp3410 daemon started\n");
649 651 set_freezable();
650 for (;;) { 652 for (;;) {
651 v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n"); 653 v4l_dbg(2, msp_debug, client, "msp3410 thread: sleep\n");
652 msp_sleep(state,-1); 654 msp_sleep(state,-1);
@@ -940,7 +942,7 @@ int msp34xxg_thread(void *data)
940 int val, i; 942 int val, i;
941 943
942 v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n"); 944 v4l_dbg(1, msp_debug, client, "msp34xxg daemon started\n");
943 945 set_freezable();
944 for (;;) { 946 for (;;) {
945 v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n"); 947 v4l_dbg(2, msp_debug, client, "msp34xxg thread: sleep\n");
946 msp_sleep(state, -1); 948 msp_sleep(state, -1);
diff --git a/drivers/media/video/tvaudio.c b/drivers/media/video/tvaudio.c
index c9bf9dbc2ea3..9da338dc4f3b 100644
--- a/drivers/media/video/tvaudio.c
+++ b/drivers/media/video/tvaudio.c
@@ -271,7 +271,7 @@ static int chip_thread(void *data)
271 struct CHIPDESC *desc = chiplist + chip->type; 271 struct CHIPDESC *desc = chiplist + chip->type;
272 272
273 v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name); 273 v4l_dbg(1, debug, &chip->c, "%s: thread started\n", chip->c.name);
274 274 set_freezable();
275 for (;;) { 275 for (;;) {
276 set_current_state(TASK_INTERRUPTIBLE); 276 set_current_state(TASK_INTERRUPTIBLE);
277 if (!kthread_should_stop()) 277 if (!kthread_should_stop())
diff --git a/drivers/media/video/video-buf-dvb.c b/drivers/media/video/video-buf-dvb.c
index fcc5467e7636..e617925ba31e 100644
--- a/drivers/media/video/video-buf-dvb.c
+++ b/drivers/media/video/video-buf-dvb.c
@@ -47,6 +47,7 @@ static int videobuf_dvb_thread(void *data)
47 int err; 47 int err;
48 48
49 dprintk("dvb thread started\n"); 49 dprintk("dvb thread started\n");
50 set_freezable();
50 videobuf_read_start(&dvb->dvbq); 51 videobuf_read_start(&dvb->dvbq);
51 52
52 for (;;) { 53 for (;;) {
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index f7e1d1910374..3ef4d0159c33 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -573,6 +573,7 @@ static int vivi_thread(void *data)
573 dprintk(1,"thread started\n"); 573 dprintk(1,"thread started\n");
574 574
575 mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT); 575 mod_timer(&dma_q->timeout, jiffies+BUFFER_TIMEOUT);
576 set_freezable();
576 577
577 for (;;) { 578 for (;;) {
578 vivi_sleep(dma_q); 579 vivi_sleep(dma_q);
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
index 8abe45e49ad7..ce62d8bfe1c8 100644
--- a/drivers/message/i2o/debug.c
+++ b/drivers/message/i2o/debug.c
@@ -24,7 +24,7 @@ void i2o_report_status(const char *severity, const char *str,
24 if (cmd == I2O_CMD_UTIL_EVT_REGISTER) 24 if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
25 return; // No status in this reply 25 return; // No status in this reply
26 26
27 printk(KERN_DEBUG "%s%s: ", severity, str); 27 printk("%s%s: ", severity, str);
28 28
29 if (cmd < 0x1F) // Utility cmd 29 if (cmd < 0x1F) // Utility cmd
30 i2o_report_util_cmd(cmd); 30 i2o_report_util_cmd(cmd);
@@ -32,7 +32,7 @@ void i2o_report_status(const char *severity, const char *str,
32 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd 32 else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
33 i2o_report_exec_cmd(cmd); 33 i2o_report_exec_cmd(cmd);
34 else 34 else
35 printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); // Other cmds 35 printk("Cmd = %0#2x, ", cmd); // Other cmds
36 36
37 if (msg[0] & MSG_FAIL) { 37 if (msg[0] & MSG_FAIL) {
38 i2o_report_fail_status(req_status, msg); 38 i2o_report_fail_status(req_status, msg);
@@ -44,7 +44,7 @@ void i2o_report_status(const char *severity, const char *str,
44 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF)) 44 if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
45 i2o_report_common_dsc(detailed_status); 45 i2o_report_common_dsc(detailed_status);
46 else 46 else
47 printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n", 47 printk(" / DetailedStatus = %0#4x.\n",
48 detailed_status); 48 detailed_status);
49} 49}
50 50
@@ -89,10 +89,10 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
89 }; 89 };
90 90
91 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE) 91 if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
92 printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n", 92 printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
93 req_status); 93 req_status);
94 else 94 else
95 printk(KERN_DEBUG "TRANSPORT_%s.\n", 95 printk("TRANSPORT_%s.\n",
96 FAIL_STATUS[req_status & 0x0F]); 96 FAIL_STATUS[req_status & 0x0F]);
97 97
98 /* Dump some details */ 98 /* Dump some details */
@@ -104,7 +104,7 @@ static void i2o_report_fail_status(u8 req_status, u32 * msg)
104 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n", 104 printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
105 msg[5] >> 16, msg[5] & 0xFFF); 105 msg[5] >> 16, msg[5] & 0xFFF);
106 106
107 printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF); 107 printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF);
108 if (msg[4] & (1 << 16)) 108 if (msg[4] & (1 << 16))
109 printk(KERN_DEBUG "(FormatError), " 109 printk(KERN_DEBUG "(FormatError), "
110 "this msg can never be delivered/processed.\n"); 110 "this msg can never be delivered/processed.\n");
@@ -142,9 +142,9 @@ static void i2o_report_common_status(u8 req_status)
142 }; 142 };
143 143
144 if (req_status >= ARRAY_SIZE(REPLY_STATUS)) 144 if (req_status >= ARRAY_SIZE(REPLY_STATUS))
145 printk(KERN_DEBUG "RequestStatus = %0#2x", req_status); 145 printk("RequestStatus = %0#2x", req_status);
146 else 146 else
147 printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]); 147 printk("%s", REPLY_STATUS[req_status]);
148} 148}
149 149
150/* 150/*
@@ -187,10 +187,10 @@ static void i2o_report_common_dsc(u16 detailed_status)
187 }; 187 };
188 188
189 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE) 189 if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
190 printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n", 190 printk(" / DetailedStatus = %0#4x.\n",
191 detailed_status); 191 detailed_status);
192 else 192 else
193 printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]); 193 printk(" / %s.\n", COMMON_DSC[detailed_status]);
194} 194}
195 195
196/* 196/*
@@ -200,49 +200,49 @@ static void i2o_report_util_cmd(u8 cmd)
200{ 200{
201 switch (cmd) { 201 switch (cmd) {
202 case I2O_CMD_UTIL_NOP: 202 case I2O_CMD_UTIL_NOP:
203 printk(KERN_DEBUG "UTIL_NOP, "); 203 printk("UTIL_NOP, ");
204 break; 204 break;
205 case I2O_CMD_UTIL_ABORT: 205 case I2O_CMD_UTIL_ABORT:
206 printk(KERN_DEBUG "UTIL_ABORT, "); 206 printk("UTIL_ABORT, ");
207 break; 207 break;
208 case I2O_CMD_UTIL_CLAIM: 208 case I2O_CMD_UTIL_CLAIM:
209 printk(KERN_DEBUG "UTIL_CLAIM, "); 209 printk("UTIL_CLAIM, ");
210 break; 210 break;
211 case I2O_CMD_UTIL_RELEASE: 211 case I2O_CMD_UTIL_RELEASE:
212 printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, "); 212 printk("UTIL_CLAIM_RELEASE, ");
213 break; 213 break;
214 case I2O_CMD_UTIL_CONFIG_DIALOG: 214 case I2O_CMD_UTIL_CONFIG_DIALOG:
215 printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, "); 215 printk("UTIL_CONFIG_DIALOG, ");
216 break; 216 break;
217 case I2O_CMD_UTIL_DEVICE_RESERVE: 217 case I2O_CMD_UTIL_DEVICE_RESERVE:
218 printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, "); 218 printk("UTIL_DEVICE_RESERVE, ");
219 break; 219 break;
220 case I2O_CMD_UTIL_DEVICE_RELEASE: 220 case I2O_CMD_UTIL_DEVICE_RELEASE:
221 printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, "); 221 printk("UTIL_DEVICE_RELEASE, ");
222 break; 222 break;
223 case I2O_CMD_UTIL_EVT_ACK: 223 case I2O_CMD_UTIL_EVT_ACK:
224 printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, "); 224 printk("UTIL_EVENT_ACKNOWLEDGE, ");
225 break; 225 break;
226 case I2O_CMD_UTIL_EVT_REGISTER: 226 case I2O_CMD_UTIL_EVT_REGISTER:
227 printk(KERN_DEBUG "UTIL_EVENT_REGISTER, "); 227 printk("UTIL_EVENT_REGISTER, ");
228 break; 228 break;
229 case I2O_CMD_UTIL_LOCK: 229 case I2O_CMD_UTIL_LOCK:
230 printk(KERN_DEBUG "UTIL_LOCK, "); 230 printk("UTIL_LOCK, ");
231 break; 231 break;
232 case I2O_CMD_UTIL_LOCK_RELEASE: 232 case I2O_CMD_UTIL_LOCK_RELEASE:
233 printk(KERN_DEBUG "UTIL_LOCK_RELEASE, "); 233 printk("UTIL_LOCK_RELEASE, ");
234 break; 234 break;
235 case I2O_CMD_UTIL_PARAMS_GET: 235 case I2O_CMD_UTIL_PARAMS_GET:
236 printk(KERN_DEBUG "UTIL_PARAMS_GET, "); 236 printk("UTIL_PARAMS_GET, ");
237 break; 237 break;
238 case I2O_CMD_UTIL_PARAMS_SET: 238 case I2O_CMD_UTIL_PARAMS_SET:
239 printk(KERN_DEBUG "UTIL_PARAMS_SET, "); 239 printk("UTIL_PARAMS_SET, ");
240 break; 240 break;
241 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY: 241 case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
242 printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, "); 242 printk("UTIL_REPLY_FAULT_NOTIFY, ");
243 break; 243 break;
244 default: 244 default:
245 printk(KERN_DEBUG "Cmd = %0#2x, ", cmd); 245 printk("Cmd = %0#2x, ", cmd);
246 } 246 }
247} 247}
248 248
@@ -253,106 +253,106 @@ static void i2o_report_exec_cmd(u8 cmd)
253{ 253{
254 switch (cmd) { 254 switch (cmd) {
255 case I2O_CMD_ADAPTER_ASSIGN: 255 case I2O_CMD_ADAPTER_ASSIGN:
256 printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, "); 256 printk("EXEC_ADAPTER_ASSIGN, ");
257 break; 257 break;
258 case I2O_CMD_ADAPTER_READ: 258 case I2O_CMD_ADAPTER_READ:
259 printk(KERN_DEBUG "EXEC_ADAPTER_READ, "); 259 printk("EXEC_ADAPTER_READ, ");
260 break; 260 break;
261 case I2O_CMD_ADAPTER_RELEASE: 261 case I2O_CMD_ADAPTER_RELEASE:
262 printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, "); 262 printk("EXEC_ADAPTER_RELEASE, ");
263 break; 263 break;
264 case I2O_CMD_BIOS_INFO_SET: 264 case I2O_CMD_BIOS_INFO_SET:
265 printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, "); 265 printk("EXEC_BIOS_INFO_SET, ");
266 break; 266 break;
267 case I2O_CMD_BOOT_DEVICE_SET: 267 case I2O_CMD_BOOT_DEVICE_SET:
268 printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, "); 268 printk("EXEC_BOOT_DEVICE_SET, ");
269 break; 269 break;
270 case I2O_CMD_CONFIG_VALIDATE: 270 case I2O_CMD_CONFIG_VALIDATE:
271 printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, "); 271 printk("EXEC_CONFIG_VALIDATE, ");
272 break; 272 break;
273 case I2O_CMD_CONN_SETUP: 273 case I2O_CMD_CONN_SETUP:
274 printk(KERN_DEBUG "EXEC_CONN_SETUP, "); 274 printk("EXEC_CONN_SETUP, ");
275 break; 275 break;
276 case I2O_CMD_DDM_DESTROY: 276 case I2O_CMD_DDM_DESTROY:
277 printk(KERN_DEBUG "EXEC_DDM_DESTROY, "); 277 printk("EXEC_DDM_DESTROY, ");
278 break; 278 break;
279 case I2O_CMD_DDM_ENABLE: 279 case I2O_CMD_DDM_ENABLE:
280 printk(KERN_DEBUG "EXEC_DDM_ENABLE, "); 280 printk("EXEC_DDM_ENABLE, ");
281 break; 281 break;
282 case I2O_CMD_DDM_QUIESCE: 282 case I2O_CMD_DDM_QUIESCE:
283 printk(KERN_DEBUG "EXEC_DDM_QUIESCE, "); 283 printk("EXEC_DDM_QUIESCE, ");
284 break; 284 break;
285 case I2O_CMD_DDM_RESET: 285 case I2O_CMD_DDM_RESET:
286 printk(KERN_DEBUG "EXEC_DDM_RESET, "); 286 printk("EXEC_DDM_RESET, ");
287 break; 287 break;
288 case I2O_CMD_DDM_SUSPEND: 288 case I2O_CMD_DDM_SUSPEND:
289 printk(KERN_DEBUG "EXEC_DDM_SUSPEND, "); 289 printk("EXEC_DDM_SUSPEND, ");
290 break; 290 break;
291 case I2O_CMD_DEVICE_ASSIGN: 291 case I2O_CMD_DEVICE_ASSIGN:
292 printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, "); 292 printk("EXEC_DEVICE_ASSIGN, ");
293 break; 293 break;
294 case I2O_CMD_DEVICE_RELEASE: 294 case I2O_CMD_DEVICE_RELEASE:
295 printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, "); 295 printk("EXEC_DEVICE_RELEASE, ");
296 break; 296 break;
297 case I2O_CMD_HRT_GET: 297 case I2O_CMD_HRT_GET:
298 printk(KERN_DEBUG "EXEC_HRT_GET, "); 298 printk("EXEC_HRT_GET, ");
299 break; 299 break;
300 case I2O_CMD_ADAPTER_CLEAR: 300 case I2O_CMD_ADAPTER_CLEAR:
301 printk(KERN_DEBUG "EXEC_IOP_CLEAR, "); 301 printk("EXEC_IOP_CLEAR, ");
302 break; 302 break;
303 case I2O_CMD_ADAPTER_CONNECT: 303 case I2O_CMD_ADAPTER_CONNECT:
304 printk(KERN_DEBUG "EXEC_IOP_CONNECT, "); 304 printk("EXEC_IOP_CONNECT, ");
305 break; 305 break;
306 case I2O_CMD_ADAPTER_RESET: 306 case I2O_CMD_ADAPTER_RESET:
307 printk(KERN_DEBUG "EXEC_IOP_RESET, "); 307 printk("EXEC_IOP_RESET, ");
308 break; 308 break;
309 case I2O_CMD_LCT_NOTIFY: 309 case I2O_CMD_LCT_NOTIFY:
310 printk(KERN_DEBUG "EXEC_LCT_NOTIFY, "); 310 printk("EXEC_LCT_NOTIFY, ");
311 break; 311 break;
312 case I2O_CMD_OUTBOUND_INIT: 312 case I2O_CMD_OUTBOUND_INIT:
313 printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, "); 313 printk("EXEC_OUTBOUND_INIT, ");
314 break; 314 break;
315 case I2O_CMD_PATH_ENABLE: 315 case I2O_CMD_PATH_ENABLE:
316 printk(KERN_DEBUG "EXEC_PATH_ENABLE, "); 316 printk("EXEC_PATH_ENABLE, ");
317 break; 317 break;
318 case I2O_CMD_PATH_QUIESCE: 318 case I2O_CMD_PATH_QUIESCE:
319 printk(KERN_DEBUG "EXEC_PATH_QUIESCE, "); 319 printk("EXEC_PATH_QUIESCE, ");
320 break; 320 break;
321 case I2O_CMD_PATH_RESET: 321 case I2O_CMD_PATH_RESET:
322 printk(KERN_DEBUG "EXEC_PATH_RESET, "); 322 printk("EXEC_PATH_RESET, ");
323 break; 323 break;
324 case I2O_CMD_STATIC_MF_CREATE: 324 case I2O_CMD_STATIC_MF_CREATE:
325 printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, "); 325 printk("EXEC_STATIC_MF_CREATE, ");
326 break; 326 break;
327 case I2O_CMD_STATIC_MF_RELEASE: 327 case I2O_CMD_STATIC_MF_RELEASE:
328 printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, "); 328 printk("EXEC_STATIC_MF_RELEASE, ");
329 break; 329 break;
330 case I2O_CMD_STATUS_GET: 330 case I2O_CMD_STATUS_GET:
331 printk(KERN_DEBUG "EXEC_STATUS_GET, "); 331 printk("EXEC_STATUS_GET, ");
332 break; 332 break;
333 case I2O_CMD_SW_DOWNLOAD: 333 case I2O_CMD_SW_DOWNLOAD:
334 printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, "); 334 printk("EXEC_SW_DOWNLOAD, ");
335 break; 335 break;
336 case I2O_CMD_SW_UPLOAD: 336 case I2O_CMD_SW_UPLOAD:
337 printk(KERN_DEBUG "EXEC_SW_UPLOAD, "); 337 printk("EXEC_SW_UPLOAD, ");
338 break; 338 break;
339 case I2O_CMD_SW_REMOVE: 339 case I2O_CMD_SW_REMOVE:
340 printk(KERN_DEBUG "EXEC_SW_REMOVE, "); 340 printk("EXEC_SW_REMOVE, ");
341 break; 341 break;
342 case I2O_CMD_SYS_ENABLE: 342 case I2O_CMD_SYS_ENABLE:
343 printk(KERN_DEBUG "EXEC_SYS_ENABLE, "); 343 printk("EXEC_SYS_ENABLE, ");
344 break; 344 break;
345 case I2O_CMD_SYS_MODIFY: 345 case I2O_CMD_SYS_MODIFY:
346 printk(KERN_DEBUG "EXEC_SYS_MODIFY, "); 346 printk("EXEC_SYS_MODIFY, ");
347 break; 347 break;
348 case I2O_CMD_SYS_QUIESCE: 348 case I2O_CMD_SYS_QUIESCE:
349 printk(KERN_DEBUG "EXEC_SYS_QUIESCE, "); 349 printk("EXEC_SYS_QUIESCE, ");
350 break; 350 break;
351 case I2O_CMD_SYS_TAB_SET: 351 case I2O_CMD_SYS_TAB_SET:
352 printk(KERN_DEBUG "EXEC_SYS_TAB_SET, "); 352 printk("EXEC_SYS_TAB_SET, ");
353 break; 353 break;
354 default: 354 default:
355 printk(KERN_DEBUG "Cmd = %#02x, ", cmd); 355 printk("Cmd = %#02x, ", cmd);
356 } 356 }
357} 357}
358 358
@@ -361,28 +361,28 @@ void i2o_debug_state(struct i2o_controller *c)
361 printk(KERN_INFO "%s: State = ", c->name); 361 printk(KERN_INFO "%s: State = ", c->name);
362 switch (((i2o_status_block *) c->status_block.virt)->iop_state) { 362 switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
363 case 0x01: 363 case 0x01:
364 printk(KERN_DEBUG "INIT\n"); 364 printk("INIT\n");
365 break; 365 break;
366 case 0x02: 366 case 0x02:
367 printk(KERN_DEBUG "RESET\n"); 367 printk("RESET\n");
368 break; 368 break;
369 case 0x04: 369 case 0x04:
370 printk(KERN_DEBUG "HOLD\n"); 370 printk("HOLD\n");
371 break; 371 break;
372 case 0x05: 372 case 0x05:
373 printk(KERN_DEBUG "READY\n"); 373 printk("READY\n");
374 break; 374 break;
375 case 0x08: 375 case 0x08:
376 printk(KERN_DEBUG "OPERATIONAL\n"); 376 printk("OPERATIONAL\n");
377 break; 377 break;
378 case 0x10: 378 case 0x10:
379 printk(KERN_DEBUG "FAILED\n"); 379 printk("FAILED\n");
380 break; 380 break;
381 case 0x11: 381 case 0x11:
382 printk(KERN_DEBUG "FAULTED\n"); 382 printk("FAULTED\n");
383 break; 383 break;
384 default: 384 default:
385 printk(KERN_DEBUG "%x (unknown !!)\n", 385 printk("%x (unknown !!)\n",
386 ((i2o_status_block *) c->status_block.virt)->iop_state); 386 ((i2o_status_block *) c->status_block.virt)->iop_state);
387 } 387 }
388}; 388};
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index c13b9321e7ab..8c83ee3b0920 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -131,8 +131,10 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
131 int rc = 0; 131 int rc = 0;
132 132
133 wait = i2o_exec_wait_alloc(); 133 wait = i2o_exec_wait_alloc();
134 if (!wait) 134 if (!wait) {
135 i2o_msg_nop(c, msg);
135 return -ENOMEM; 136 return -ENOMEM;
137 }
136 138
137 if (tcntxt == 0xffffffff) 139 if (tcntxt == 0xffffffff)
138 tcntxt = 0x80000000; 140 tcntxt = 0x80000000;
@@ -337,6 +339,8 @@ static int i2o_exec_probe(struct device *dev)
337 rc = device_create_file(dev, &dev_attr_product_id); 339 rc = device_create_file(dev, &dev_attr_product_id);
338 if (rc) goto err_vid; 340 if (rc) goto err_vid;
339 341
342 i2o_dev->iop->exec = i2o_dev;
343
340 return 0; 344 return 0;
341 345
342err_vid: 346err_vid:
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b17c4b2bc9ef..64a52bd7544a 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -215,7 +215,7 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
215 struct i2o_message *msg; 215 struct i2o_message *msg;
216 216
217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 217 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
218 if (IS_ERR(msg) == I2O_QUEUE_EMPTY) 218 if (IS_ERR(msg))
219 return PTR_ERR(msg); 219 return PTR_ERR(msg);
220 220
221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 221 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 8ba275a12773..84e046e94f5f 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -554,8 +554,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
554 return -ENXIO; 554 return -ENXIO;
555 } 555 }
556 556
557 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
558
559 sb = c->status_block.virt; 557 sb = c->status_block.virt;
560 558
561 if (get_user(size, &user_msg[0])) { 559 if (get_user(size, &user_msg[0])) {
@@ -573,24 +571,30 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
573 571
574 size <<= 2; // Convert to bytes 572 size <<= 2; // Convert to bytes
575 573
574 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
575 if (IS_ERR(msg))
576 return PTR_ERR(msg);
577
578 rcode = -EFAULT;
576 /* Copy in the user's I2O command */ 579 /* Copy in the user's I2O command */
577 if (copy_from_user(msg, user_msg, size)) { 580 if (copy_from_user(msg, user_msg, size)) {
578 osm_warn("unable to copy user message\n"); 581 osm_warn("unable to copy user message\n");
579 return -EFAULT; 582 goto out;
580 } 583 }
581 i2o_dump_message(msg); 584 i2o_dump_message(msg);
582 585
583 if (get_user(reply_size, &user_reply[0]) < 0) 586 if (get_user(reply_size, &user_reply[0]) < 0)
584 return -EFAULT; 587 goto out;
585 588
586 reply_size >>= 16; 589 reply_size >>= 16;
587 reply_size <<= 2; 590 reply_size <<= 2;
588 591
592 rcode = -ENOMEM;
589 reply = kzalloc(reply_size, GFP_KERNEL); 593 reply = kzalloc(reply_size, GFP_KERNEL);
590 if (!reply) { 594 if (!reply) {
591 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 595 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
592 c->name); 596 c->name);
593 return -ENOMEM; 597 goto out;
594 } 598 }
595 599
596 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 600 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -661,13 +665,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
661 } 665 }
662 666
663 rcode = i2o_msg_post_wait(c, msg, 60); 667 rcode = i2o_msg_post_wait(c, msg, 60);
668 msg = NULL;
664 if (rcode) { 669 if (rcode) {
665 reply[4] = ((u32) rcode) << 24; 670 reply[4] = ((u32) rcode) << 24;
666 goto sg_list_cleanup; 671 goto sg_list_cleanup;
667 } 672 }
668 673
669 if (sg_offset) { 674 if (sg_offset) {
670 u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; 675 u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
671 /* Copy back the Scatter Gather buffers back to user space */ 676 /* Copy back the Scatter Gather buffers back to user space */
672 u32 j; 677 u32 j;
673 // TODO 64bit fix 678 // TODO 64bit fix
@@ -675,7 +680,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
675 int sg_size; 680 int sg_size;
676 681
677 // re-acquire the original message to handle correctly the sg copy operation 682 // re-acquire the original message to handle correctly the sg copy operation
678 memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); 683 memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
679 // get user msg size in u32s 684 // get user msg size in u32s
680 if (get_user(size, &user_msg[0])) { 685 if (get_user(size, &user_msg[0])) {
681 rcode = -EFAULT; 686 rcode = -EFAULT;
@@ -684,7 +689,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
684 size = size >> 16; 689 size = size >> 16;
685 size *= 4; 690 size *= 4;
686 /* Copy in the user's I2O command */ 691 /* Copy in the user's I2O command */
687 if (copy_from_user(msg, user_msg, size)) { 692 if (copy_from_user(rmsg, user_msg, size)) {
688 rcode = -EFAULT; 693 rcode = -EFAULT;
689 goto sg_list_cleanup; 694 goto sg_list_cleanup;
690 } 695 }
@@ -692,7 +697,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
692 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 697 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
693 698
694 // TODO 64bit fix 699 // TODO 64bit fix
695 sg = (struct sg_simple_element *)(msg + sg_offset); 700 sg = (struct sg_simple_element *)(rmsg + sg_offset);
696 for (j = 0; j < sg_count; j++) { 701 for (j = 0; j < sg_count; j++) {
697 /* Copy out the SG list to user's buffer if necessary */ 702 /* Copy out the SG list to user's buffer if necessary */
698 if (! 703 if (!
@@ -714,7 +719,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
714 } 719 }
715 } 720 }
716 721
717 sg_list_cleanup: 722sg_list_cleanup:
718 /* Copy back the reply to user space */ 723 /* Copy back the reply to user space */
719 if (reply_size) { 724 if (reply_size) {
720 // we wrote our own values for context - now restore the user supplied ones 725 // we wrote our own values for context - now restore the user supplied ones
@@ -723,7 +728,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
723 "%s: Could not copy message context FROM user\n", 728 "%s: Could not copy message context FROM user\n",
724 c->name); 729 c->name);
725 rcode = -EFAULT; 730 rcode = -EFAULT;
726 goto sg_list_cleanup;
727 } 731 }
728 if (copy_to_user(user_reply, reply, reply_size)) { 732 if (copy_to_user(user_reply, reply, reply_size)) {
729 printk(KERN_WARNING 733 printk(KERN_WARNING
@@ -731,12 +735,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
731 rcode = -EFAULT; 735 rcode = -EFAULT;
732 } 736 }
733 } 737 }
734
735 for (i = 0; i < sg_index; i++) 738 for (i = 0; i < sg_index; i++)
736 i2o_dma_free(&c->pdev->dev, &sg_list[i]); 739 i2o_dma_free(&c->pdev->dev, &sg_list[i]);
737 740
738 cleanup: 741cleanup:
739 kfree(reply); 742 kfree(reply);
743out:
744 if (msg)
745 i2o_msg_nop(c, msg);
740 return rcode; 746 return rcode;
741} 747}
742 748
@@ -793,8 +799,6 @@ static int i2o_cfg_passthru(unsigned long arg)
793 return -ENXIO; 799 return -ENXIO;
794 } 800 }
795 801
796 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
797
798 sb = c->status_block.virt; 802 sb = c->status_block.virt;
799 803
800 if (get_user(size, &user_msg[0])) 804 if (get_user(size, &user_msg[0]))
@@ -810,12 +814,17 @@ static int i2o_cfg_passthru(unsigned long arg)
810 814
811 size <<= 2; // Convert to bytes 815 size <<= 2; // Convert to bytes
812 816
817 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
818 if (IS_ERR(msg))
819 return PTR_ERR(msg);
820
821 rcode = -EFAULT;
813 /* Copy in the user's I2O command */ 822 /* Copy in the user's I2O command */
814 if (copy_from_user(msg, user_msg, size)) 823 if (copy_from_user(msg, user_msg, size))
815 return -EFAULT; 824 goto out;
816 825
817 if (get_user(reply_size, &user_reply[0]) < 0) 826 if (get_user(reply_size, &user_reply[0]) < 0)
818 return -EFAULT; 827 goto out;
819 828
820 reply_size >>= 16; 829 reply_size >>= 16;
821 reply_size <<= 2; 830 reply_size <<= 2;
@@ -824,7 +833,8 @@ static int i2o_cfg_passthru(unsigned long arg)
824 if (!reply) { 833 if (!reply) {
825 printk(KERN_WARNING "%s: Could not allocate reply buffer\n", 834 printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
826 c->name); 835 c->name);
827 return -ENOMEM; 836 rcode = -ENOMEM;
837 goto out;
828 } 838 }
829 839
830 sg_offset = (msg->u.head[0] >> 4) & 0x0f; 840 sg_offset = (msg->u.head[0] >> 4) & 0x0f;
@@ -891,13 +901,14 @@ static int i2o_cfg_passthru(unsigned long arg)
891 } 901 }
892 902
893 rcode = i2o_msg_post_wait(c, msg, 60); 903 rcode = i2o_msg_post_wait(c, msg, 60);
904 msg = NULL;
894 if (rcode) { 905 if (rcode) {
895 reply[4] = ((u32) rcode) << 24; 906 reply[4] = ((u32) rcode) << 24;
896 goto sg_list_cleanup; 907 goto sg_list_cleanup;
897 } 908 }
898 909
899 if (sg_offset) { 910 if (sg_offset) {
900 u32 msg[128]; 911 u32 rmsg[128];
901 /* Copy back the Scatter Gather buffers back to user space */ 912 /* Copy back the Scatter Gather buffers back to user space */
902 u32 j; 913 u32 j;
903 // TODO 64bit fix 914 // TODO 64bit fix
@@ -905,7 +916,7 @@ static int i2o_cfg_passthru(unsigned long arg)
905 int sg_size; 916 int sg_size;
906 917
907 // re-acquire the original message to handle correctly the sg copy operation 918 // re-acquire the original message to handle correctly the sg copy operation
908 memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); 919 memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
909 // get user msg size in u32s 920 // get user msg size in u32s
910 if (get_user(size, &user_msg[0])) { 921 if (get_user(size, &user_msg[0])) {
911 rcode = -EFAULT; 922 rcode = -EFAULT;
@@ -914,7 +925,7 @@ static int i2o_cfg_passthru(unsigned long arg)
914 size = size >> 16; 925 size = size >> 16;
915 size *= 4; 926 size *= 4;
916 /* Copy in the user's I2O command */ 927 /* Copy in the user's I2O command */
917 if (copy_from_user(msg, user_msg, size)) { 928 if (copy_from_user(rmsg, user_msg, size)) {
918 rcode = -EFAULT; 929 rcode = -EFAULT;
919 goto sg_list_cleanup; 930 goto sg_list_cleanup;
920 } 931 }
@@ -922,7 +933,7 @@ static int i2o_cfg_passthru(unsigned long arg)
922 (size - sg_offset * 4) / sizeof(struct sg_simple_element); 933 (size - sg_offset * 4) / sizeof(struct sg_simple_element);
923 934
924 // TODO 64bit fix 935 // TODO 64bit fix
925 sg = (struct sg_simple_element *)(msg + sg_offset); 936 sg = (struct sg_simple_element *)(rmsg + sg_offset);
926 for (j = 0; j < sg_count; j++) { 937 for (j = 0; j < sg_count; j++) {
927 /* Copy out the SG list to user's buffer if necessary */ 938 /* Copy out the SG list to user's buffer if necessary */
928 if (! 939 if (!
@@ -944,7 +955,7 @@ static int i2o_cfg_passthru(unsigned long arg)
944 } 955 }
945 } 956 }
946 957
947 sg_list_cleanup: 958sg_list_cleanup:
948 /* Copy back the reply to user space */ 959 /* Copy back the reply to user space */
949 if (reply_size) { 960 if (reply_size) {
950 // we wrote our own values for context - now restore the user supplied ones 961 // we wrote our own values for context - now restore the user supplied ones
@@ -964,8 +975,11 @@ static int i2o_cfg_passthru(unsigned long arg)
964 for (i = 0; i < sg_index; i++) 975 for (i = 0; i < sg_index; i++)
965 kfree(sg_list[i]); 976 kfree(sg_list[i]);
966 977
967 cleanup: 978cleanup:
968 kfree(reply); 979 kfree(reply);
980out:
981 if (msg)
982 i2o_msg_nop(c, msg);
969 return rcode; 983 return rcode;
970} 984}
971#endif 985#endif
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38e815a2e871..fdbaa776f249 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -209,6 +209,7 @@ static int ucb1x00_thread(void *_ts)
209 DECLARE_WAITQUEUE(wait, tsk); 209 DECLARE_WAITQUEUE(wait, tsk);
210 int valid = 0; 210 int valid = 0;
211 211
212 set_freezable();
212 add_wait_queue(&ts->irq_wait, &wait); 213 add_wait_queue(&ts->irq_wait, &wait);
213 while (!kthread_should_stop()) { 214 while (!kthread_should_stop()) {
214 unsigned int x, y, p; 215 unsigned int x, y, p;
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a92b8728b90c..1d516f24ba53 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -10,7 +10,7 @@ if MISC_DEVICES
10 10
11config IBM_ASM 11config IBM_ASM
12 tristate "Device driver for IBM RSA service processor" 12 tristate "Device driver for IBM RSA service processor"
13 depends on X86 && PCI && EXPERIMENTAL 13 depends on X86 && PCI && INPUT && EXPERIMENTAL
14 ---help--- 14 ---help---
15 This option enables device driver support for in-band access to the 15 This option enables device driver support for in-band access to the
16 IBM RSA (Condor) service processor in eServer xSeries systems. 16 IBM RSA (Condor) service processor in eServer xSeries systems.
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 07a085ccbd5b..b5df347c81b9 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
@@ -72,7 +72,7 @@ struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_s
72static void free_command(struct kobject *kobj) 72static void free_command(struct kobject *kobj)
73{ 73{
74 struct command *cmd = to_command(kobj); 74 struct command *cmd = to_command(kobj);
75 75
76 list_del(&cmd->queue_node); 76 list_del(&cmd->queue_node);
77 atomic_dec(&command_count); 77 atomic_dec(&command_count);
78 dbg("command count: %d\n", atomic_read(&command_count)); 78 dbg("command count: %d\n", atomic_read(&command_count));
@@ -113,14 +113,14 @@ static inline void do_exec_command(struct service_processor *sp)
113 exec_next_command(sp); 113 exec_next_command(sp);
114 } 114 }
115} 115}
116 116
117/** 117/**
118 * exec_command 118 * exec_command
119 * send a command to a service processor 119 * send a command to a service processor
120 * Commands are executed sequentially. One command (sp->current_command) 120 * Commands are executed sequentially. One command (sp->current_command)
121 * is sent to the service processor. Once the interrupt handler gets a 121 * is sent to the service processor. Once the interrupt handler gets a
122 * message of type command_response, the message is copied into 122 * message of type command_response, the message is copied into
123 * the current commands buffer, 123 * the current commands buffer,
124 */ 124 */
125void ibmasm_exec_command(struct service_processor *sp, struct command *cmd) 125void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
126{ 126{
@@ -160,7 +160,7 @@ static void exec_next_command(struct service_processor *sp)
160 } 160 }
161} 161}
162 162
163/** 163/**
164 * Sleep until a command has failed or a response has been received 164 * Sleep until a command has failed or a response has been received
165 * and the command status been updated by the interrupt handler. 165 * and the command status been updated by the interrupt handler.
166 * (see receive_response). 166 * (see receive_response).
@@ -182,8 +182,8 @@ void ibmasm_receive_command_response(struct service_processor *sp, void *respons
182{ 182{
183 struct command *cmd = sp->current_command; 183 struct command *cmd = sp->current_command;
184 184
185 if (!sp->current_command) 185 if (!sp->current_command)
186 return; 186 return;
187 187
188 memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size)); 188 memcpy_fromio(cmd->buffer, response, min(size, cmd->buffer_size));
189 cmd->status = IBMASM_CMD_COMPLETE; 189 cmd->status = IBMASM_CMD_COMPLETE;
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 13c52f866e2e..3dd2dfb8da17 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
@@ -44,11 +44,11 @@ void ibmasm_receive_message(struct service_processor *sp, void *message, int mes
44 size = message_size; 44 size = message_size;
45 45
46 switch (header->type) { 46 switch (header->type) {
47 case sp_event: 47 case sp_event:
48 ibmasm_receive_event(sp, message, size); 48 ibmasm_receive_event(sp, message, size);
49 break; 49 break;
50 case sp_command_response: 50 case sp_command_response:
51 ibmasm_receive_command_response(sp, message, size); 51 ibmasm_receive_command_response(sp, message, size);
52 break; 52 break;
53 case sp_heartbeat: 53 case sp_heartbeat:
54 ibmasm_receive_heartbeat(sp, message, size); 54 ibmasm_receive_heartbeat(sp, message, size);
@@ -95,7 +95,7 @@ int ibmasm_send_driver_vpd(struct service_processor *sp)
95 strcat(vpd_data, IBMASM_DRIVER_VPD); 95 strcat(vpd_data, IBMASM_DRIVER_VPD);
96 vpd_data[10] = 0; 96 vpd_data[10] = 0;
97 vpd_data[15] = 0; 97 vpd_data[15] = 0;
98 98
99 ibmasm_exec_command(sp, command); 99 ibmasm_exec_command(sp, command);
100 ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL); 100 ibmasm_wait_for_response(command, IBMASM_CMD_TIMEOUT_NORMAL);
101 101
@@ -118,7 +118,7 @@ struct os_state_command {
118 * During driver init this function is called with os state "up". 118 * During driver init this function is called with os state "up".
119 * This causes the service processor to start sending heartbeats the 119 * This causes the service processor to start sending heartbeats the
120 * driver. 120 * driver.
121 * During driver exit the function is called with os state "down", 121 * During driver exit the function is called with os state "down",
122 * causing the service processor to stop the heartbeats. 122 * causing the service processor to stop the heartbeats.
123 */ 123 */
124int ibmasm_send_os_state(struct service_processor *sp, int os_state) 124int ibmasm_send_os_state(struct service_processor *sp, int os_state)
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 2d21c2741b6a..6cbba1afef35 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index fe1e819235a4..fda6a4d3bf23 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
@@ -51,7 +51,7 @@ static void wake_up_event_readers(struct service_processor *sp)
51 * event readers. 51 * event readers.
52 * There is no reader marker in the buffer, therefore readers are 52 * There is no reader marker in the buffer, therefore readers are
53 * responsible for keeping up with the writer, or they will loose events. 53 * responsible for keeping up with the writer, or they will loose events.
54 */ 54 */
55void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size) 55void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size)
56{ 56{
57 struct event_buffer *buffer = sp->event_buffer; 57 struct event_buffer *buffer = sp->event_buffer;
@@ -77,13 +77,13 @@ void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int
77 77
78static inline int event_available(struct event_buffer *b, struct event_reader *r) 78static inline int event_available(struct event_buffer *b, struct event_reader *r)
79{ 79{
80 return (r->next_serial_number < b->next_serial_number); 80 return (r->next_serial_number < b->next_serial_number);
81} 81}
82 82
83/** 83/**
84 * get_next_event 84 * get_next_event
85 * Called by event readers (initiated from user space through the file 85 * Called by event readers (initiated from user space through the file
86 * system). 86 * system).
87 * Sleeps until a new event is available. 87 * Sleeps until a new event is available.
88 */ 88 */
89int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader) 89int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader)
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 7fd7a43e38de..3036e785b3e4 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index 958c957a5e75..bf2c738d2b72 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
@@ -26,9 +26,9 @@ struct i2o_header {
26 u8 version; 26 u8 version;
27 u8 message_flags; 27 u8 message_flags;
28 u16 message_size; 28 u16 message_size;
29 u8 target; 29 u8 target;
30 u8 initiator_and_target; 30 u8 initiator_and_target;
31 u8 initiator; 31 u8 initiator;
32 u8 function; 32 u8 function;
33 u32 initiator_context; 33 u32 initiator_context;
34}; 34};
@@ -64,12 +64,12 @@ static inline unsigned short outgoing_message_size(unsigned int data_size)
64 size = sizeof(struct i2o_header) + data_size; 64 size = sizeof(struct i2o_header) + data_size;
65 65
66 i2o_size = size / sizeof(u32); 66 i2o_size = size / sizeof(u32);
67 67
68 if (size % sizeof(u32)) 68 if (size % sizeof(u32))
69 i2o_size++; 69 i2o_size++;
70 70
71 return i2o_size; 71 return i2o_size;
72} 72}
73 73
74static inline u32 incoming_data_size(struct i2o_message *i2o_message) 74static inline u32 incoming_data_size(struct i2o_message *i2o_message)
75{ 75{
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 48d5abebfc30..de860bc6d3f5 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
@@ -58,8 +58,8 @@ static inline char *get_timestamp(char *buf)
58 return buf; 58 return buf;
59} 59}
60 60
61#define IBMASM_CMD_PENDING 0 61#define IBMASM_CMD_PENDING 0
62#define IBMASM_CMD_COMPLETE 1 62#define IBMASM_CMD_COMPLETE 1
63#define IBMASM_CMD_FAILED 2 63#define IBMASM_CMD_FAILED 2
64 64
65#define IBMASM_CMD_TIMEOUT_NORMAL 45 65#define IBMASM_CMD_TIMEOUT_NORMAL 45
@@ -163,55 +163,55 @@ struct service_processor {
163}; 163};
164 164
165/* command processing */ 165/* command processing */
166extern struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size); 166struct command *ibmasm_new_command(struct service_processor *sp, size_t buffer_size);
167extern void ibmasm_exec_command(struct service_processor *sp, struct command *cmd); 167void ibmasm_exec_command(struct service_processor *sp, struct command *cmd);
168extern void ibmasm_wait_for_response(struct command *cmd, int timeout); 168void ibmasm_wait_for_response(struct command *cmd, int timeout);
169extern void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size); 169void ibmasm_receive_command_response(struct service_processor *sp, void *response, size_t size);
170 170
171/* event processing */ 171/* event processing */
172extern int ibmasm_event_buffer_init(struct service_processor *sp); 172int ibmasm_event_buffer_init(struct service_processor *sp);
173extern void ibmasm_event_buffer_exit(struct service_processor *sp); 173void ibmasm_event_buffer_exit(struct service_processor *sp);
174extern void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size); 174void ibmasm_receive_event(struct service_processor *sp, void *data, unsigned int data_size);
175extern void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader); 175void ibmasm_event_reader_register(struct service_processor *sp, struct event_reader *reader);
176extern void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader); 176void ibmasm_event_reader_unregister(struct service_processor *sp, struct event_reader *reader);
177extern int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader); 177int ibmasm_get_next_event(struct service_processor *sp, struct event_reader *reader);
178extern void ibmasm_cancel_next_event(struct event_reader *reader); 178void ibmasm_cancel_next_event(struct event_reader *reader);
179 179
180/* heartbeat - from SP to OS */ 180/* heartbeat - from SP to OS */
181extern void ibmasm_register_panic_notifier(void); 181void ibmasm_register_panic_notifier(void);
182extern void ibmasm_unregister_panic_notifier(void); 182void ibmasm_unregister_panic_notifier(void);
183extern int ibmasm_heartbeat_init(struct service_processor *sp); 183int ibmasm_heartbeat_init(struct service_processor *sp);
184extern void ibmasm_heartbeat_exit(struct service_processor *sp); 184void ibmasm_heartbeat_exit(struct service_processor *sp);
185extern void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size); 185void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size_t size);
186 186
187/* reverse heartbeat - from OS to SP */ 187/* reverse heartbeat - from OS to SP */
188extern void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); 188void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
189extern int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb); 189int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb);
190extern void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb); 190void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb);
191 191
192/* dot commands */ 192/* dot commands */
193extern void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size); 193void ibmasm_receive_message(struct service_processor *sp, void *data, int data_size);
194extern int ibmasm_send_driver_vpd(struct service_processor *sp); 194int ibmasm_send_driver_vpd(struct service_processor *sp);
195extern int ibmasm_send_os_state(struct service_processor *sp, int os_state); 195int ibmasm_send_os_state(struct service_processor *sp, int os_state);
196 196
197/* low level message processing */ 197/* low level message processing */
198extern int ibmasm_send_i2o_message(struct service_processor *sp); 198int ibmasm_send_i2o_message(struct service_processor *sp);
199extern irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id); 199irqreturn_t ibmasm_interrupt_handler(int irq, void * dev_id);
200 200
201/* remote console */ 201/* remote console */
202extern void ibmasm_handle_mouse_interrupt(struct service_processor *sp); 202void ibmasm_handle_mouse_interrupt(struct service_processor *sp);
203extern int ibmasm_init_remote_input_dev(struct service_processor *sp); 203int ibmasm_init_remote_input_dev(struct service_processor *sp);
204extern void ibmasm_free_remote_input_dev(struct service_processor *sp); 204void ibmasm_free_remote_input_dev(struct service_processor *sp);
205 205
206/* file system */ 206/* file system */
207extern int ibmasmfs_register(void); 207int ibmasmfs_register(void);
208extern void ibmasmfs_unregister(void); 208void ibmasmfs_unregister(void);
209extern void ibmasmfs_add_sp(struct service_processor *sp); 209void ibmasmfs_add_sp(struct service_processor *sp);
210 210
211/* uart */ 211/* uart */
212#ifdef CONFIG_SERIAL_8250 212#ifdef CONFIG_SERIAL_8250
213extern void ibmasm_register_uart(struct service_processor *sp); 213void ibmasm_register_uart(struct service_processor *sp);
214extern void ibmasm_unregister_uart(struct service_processor *sp); 214void ibmasm_unregister_uart(struct service_processor *sp);
215#else 215#else
216#define ibmasm_register_uart(sp) do { } while(0) 216#define ibmasm_register_uart(sp) do { } while(0)
217#define ibmasm_unregister_uart(sp) do { } while(0) 217#define ibmasm_unregister_uart(sp) do { } while(0)
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c436d3de8b8b..eb7b073734b8 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,12 +17,12 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
24/* 24/*
25 * Parts of this code are based on an article by Jonathan Corbet 25 * Parts of this code are based on an article by Jonathan Corbet
26 * that appeared in Linux Weekly News. 26 * that appeared in Linux Weekly News.
27 */ 27 */
28 28
@@ -55,22 +55,22 @@
55 * For each service processor the following files are created: 55 * For each service processor the following files are created:
56 * 56 *
57 * command: execute dot commands 57 * command: execute dot commands
58 * write: execute a dot command on the service processor 58 * write: execute a dot command on the service processor
59 * read: return the result of a previously executed dot command 59 * read: return the result of a previously executed dot command
60 * 60 *
61 * events: listen for service processor events 61 * events: listen for service processor events
62 * read: sleep (interruptible) until an event occurs 62 * read: sleep (interruptible) until an event occurs
63 * write: wakeup sleeping event listener 63 * write: wakeup sleeping event listener
64 * 64 *
65 * reverse_heartbeat: send a heartbeat to the service processor 65 * reverse_heartbeat: send a heartbeat to the service processor
66 * read: sleep (interruptible) until the reverse heartbeat fails 66 * read: sleep (interruptible) until the reverse heartbeat fails
67 * write: wakeup sleeping heartbeat listener 67 * write: wakeup sleeping heartbeat listener
68 * 68 *
69 * remote_video/width 69 * remote_video/width
70 * remote_video/height 70 * remote_video/height
71 * remote_video/width: control remote display settings 71 * remote_video/width: control remote display settings
72 * write: set value 72 * write: set value
73 * read: read value 73 * read: read value
74 */ 74 */
75 75
76#include <linux/fs.h> 76#include <linux/fs.h>
@@ -155,7 +155,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
155 155
156static struct dentry *ibmasmfs_create_file (struct super_block *sb, 156static struct dentry *ibmasmfs_create_file (struct super_block *sb,
157 struct dentry *parent, 157 struct dentry *parent,
158 const char *name, 158 const char *name,
159 const struct file_operations *fops, 159 const struct file_operations *fops,
160 void *data, 160 void *data,
161 int mode) 161 int mode)
@@ -261,7 +261,7 @@ static int command_file_close(struct inode *inode, struct file *file)
261 struct ibmasmfs_command_data *command_data = file->private_data; 261 struct ibmasmfs_command_data *command_data = file->private_data;
262 262
263 if (command_data->command) 263 if (command_data->command)
264 command_put(command_data->command); 264 command_put(command_data->command);
265 265
266 kfree(command_data); 266 kfree(command_data);
267 return 0; 267 return 0;
@@ -348,7 +348,7 @@ static ssize_t command_file_write(struct file *file, const char __user *ubuff, s
348static int event_file_open(struct inode *inode, struct file *file) 348static int event_file_open(struct inode *inode, struct file *file)
349{ 349{
350 struct ibmasmfs_event_data *event_data; 350 struct ibmasmfs_event_data *event_data;
351 struct service_processor *sp; 351 struct service_processor *sp;
352 352
353 if (!inode->i_private) 353 if (!inode->i_private)
354 return -ENODEV; 354 return -ENODEV;
@@ -573,7 +573,7 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user *
573 kfree(buff); 573 kfree(buff);
574 return -EFAULT; 574 return -EFAULT;
575 } 575 }
576 576
577 value = simple_strtoul(buff, NULL, 10); 577 value = simple_strtoul(buff, NULL, 10);
578 writel(value, address); 578 writel(value, address);
579 kfree(buff); 579 kfree(buff);
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index a3c589b7cbfa..4b2398e27fd5 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index e5ed59c589aa..766766523a60 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
17 * 17 *
18 * Copyright (C) IBM Corporation, 2004 18 * Copyright (C) IBM Corporation, 2004
19 * 19 *
20 * Author: Max Asböck <amax@us.ibm.com> 20 * Author: Max Asböck <amax@us.ibm.com>
21 * 21 *
22 */ 22 */
23 23
@@ -48,9 +48,9 @@
48#define INTR_CONTROL_REGISTER 0x13A4 48#define INTR_CONTROL_REGISTER 0x13A4
49 49
50#define SCOUT_COM_A_BASE 0x0000 50#define SCOUT_COM_A_BASE 0x0000
51#define SCOUT_COM_B_BASE 0x0100 51#define SCOUT_COM_B_BASE 0x0100
52#define SCOUT_COM_C_BASE 0x0200 52#define SCOUT_COM_C_BASE 0x0200
53#define SCOUT_COM_D_BASE 0x0300 53#define SCOUT_COM_D_BASE 0x0300
54 54
55static inline int sp_interrupt_pending(void __iomem *base_address) 55static inline int sp_interrupt_pending(void __iomem *base_address)
56{ 56{
@@ -86,12 +86,12 @@ static inline void disable_sp_interrupts(void __iomem *base_address)
86 86
87static inline void enable_uart_interrupts(void __iomem *base_address) 87static inline void enable_uart_interrupts(void __iomem *base_address)
88{ 88{
89 ibmasm_enable_interrupts(base_address, UART_INTR_MASK); 89 ibmasm_enable_interrupts(base_address, UART_INTR_MASK);
90} 90}
91 91
92static inline void disable_uart_interrupts(void __iomem *base_address) 92static inline void disable_uart_interrupts(void __iomem *base_address)
93{ 93{
94 ibmasm_disable_interrupts(base_address, UART_INTR_MASK); 94 ibmasm_disable_interrupts(base_address, UART_INTR_MASK);
95} 95}
96 96
97#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE ) 97#define valid_mfa(mfa) ( (mfa) != NO_MFAS_AVAILABLE )
@@ -111,7 +111,7 @@ static inline u32 get_mfa_outbound(void __iomem *base_address)
111 111
112static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa) 112static inline void set_mfa_outbound(void __iomem *base_address, u32 mfa)
113{ 113{
114 writel(mfa, base_address + OUTBOUND_QUEUE_PORT); 114 writel(mfa, base_address + OUTBOUND_QUEUE_PORT);
115} 115}
116 116
117static inline u32 get_mfa_inbound(void __iomem *base_address) 117static inline u32 get_mfa_inbound(void __iomem *base_address)
@@ -126,7 +126,7 @@ static inline u32 get_mfa_inbound(void __iomem *base_address)
126 126
127static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa) 127static inline void set_mfa_inbound(void __iomem *base_address, u32 mfa)
128{ 128{
129 writel(mfa, base_address + INBOUND_QUEUE_PORT); 129 writel(mfa, base_address + INBOUND_QUEUE_PORT);
130} 130}
131 131
132static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa) 132static inline struct i2o_message *get_i2o_message(void __iomem *base_address, u32 mfa)
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 2f3bddfab937..fb03a853fac4 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,9 +18,9 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 * This driver is based on code originally written by Pete Reynolds 23 * This driver is based on code originally written by Pete Reynolds
24 * and others. 24 * and others.
25 * 25 *
26 */ 26 */
@@ -30,13 +30,13 @@
30 * 30 *
31 * 1) When loaded it sends a message to the service processor, 31 * 1) When loaded it sends a message to the service processor,
32 * indicating that an OS is * running. This causes the service processor 32 * indicating that an OS is * running. This causes the service processor
33 * to send periodic heartbeats to the OS. 33 * to send periodic heartbeats to the OS.
34 * 34 *
35 * 2) Answers the periodic heartbeats sent by the service processor. 35 * 2) Answers the periodic heartbeats sent by the service processor.
36 * Failure to do so would result in system reboot. 36 * Failure to do so would result in system reboot.
37 * 37 *
38 * 3) Acts as a pass through for dot commands sent from user applications. 38 * 3) Acts as a pass through for dot commands sent from user applications.
39 * The interface for this is the ibmasmfs file system. 39 * The interface for this is the ibmasmfs file system.
40 * 40 *
41 * 4) Allows user applications to register for event notification. Events 41 * 4) Allows user applications to register for event notification. Events
42 * are sent to the driver through interrupts. They can be read from user 42 * are sent to the driver through interrupts. They can be read from user
@@ -105,7 +105,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
105 } 105 }
106 106
107 sp->irq = pdev->irq; 107 sp->irq = pdev->irq;
108 sp->base_address = ioremap(pci_resource_start(pdev, 0), 108 sp->base_address = ioremap(pci_resource_start(pdev, 0),
109 pci_resource_len(pdev, 0)); 109 pci_resource_len(pdev, 0));
110 if (sp->base_address == 0) { 110 if (sp->base_address == 0) {
111 dev_err(sp->dev, "Failed to ioremap pci memory\n"); 111 dev_err(sp->dev, "Failed to ioremap pci memory\n");
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index f8fdb2d5417e..bec9e2c44bef 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
16 * 16 *
17 * Copyright (C) IBM Corporation, 2004 17 * Copyright (C) IBM Corporation, 2004
18 * 18 *
19 * Author: Max Asböck <amax@us.ibm.com> 19 * Author: Max Asböck <amax@us.ibm.com>
20 * 20 *
21 */ 21 */
22 22
@@ -36,10 +36,10 @@ static struct {
36 unsigned char command[3]; 36 unsigned char command[3];
37} rhb_dot_cmd = { 37} rhb_dot_cmd = {
38 .header = { 38 .header = {
39 .type = sp_read, 39 .type = sp_read,
40 .command_size = 3, 40 .command_size = 3,
41 .data_size = 0, 41 .data_size = 0,
42 .status = 0 42 .status = 0
43 }, 43 },
44 .command = { 4, 3, 6 } 44 .command = { 4, 3, 6 }
45}; 45};
@@ -76,9 +76,9 @@ int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_
76 if (cmd->status != IBMASM_CMD_COMPLETE) 76 if (cmd->status != IBMASM_CMD_COMPLETE)
77 times_failed++; 77 times_failed++;
78 78
79 wait_event_interruptible_timeout(rhb->wait, 79 wait_event_interruptible_timeout(rhb->wait,
80 rhb->stopped, 80 rhb->stopped,
81 REVERSE_HEARTBEAT_TIMEOUT * HZ); 81 REVERSE_HEARTBEAT_TIMEOUT * HZ);
82 82
83 if (signal_pending(current) || rhb->stopped) { 83 if (signal_pending(current) || rhb->stopped) {
84 result = -EINTR; 84 result = -EINTR;
diff --git a/drivers/misc/ibmasm/remote.c b/drivers/misc/ibmasm/remote.c
index a40fda6c402c..0550ce075fc4 100644
--- a/drivers/misc/ibmasm/remote.c
+++ b/drivers/misc/ibmasm/remote.c
@@ -28,11 +28,10 @@
28#include "ibmasm.h" 28#include "ibmasm.h"
29#include "remote.h" 29#include "remote.h"
30 30
31static int xmax = 1600; 31#define MOUSE_X_MAX 1600
32static int ymax = 1200; 32#define MOUSE_Y_MAX 1200
33 33
34 34static const unsigned short xlate_high[XLATE_SIZE] = {
35static unsigned short xlate_high[XLATE_SIZE] = {
36 [KEY_SYM_ENTER & 0xff] = KEY_ENTER, 35 [KEY_SYM_ENTER & 0xff] = KEY_ENTER,
37 [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH, 36 [KEY_SYM_KPSLASH & 0xff] = KEY_KPSLASH,
38 [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK, 37 [KEY_SYM_KPSTAR & 0xff] = KEY_KPASTERISK,
@@ -81,7 +80,8 @@ static unsigned short xlate_high[XLATE_SIZE] = {
81 [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK, 80 [KEY_SYM_NUM_LOCK & 0xff] = KEY_NUMLOCK,
82 [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK, 81 [KEY_SYM_SCR_LOCK & 0xff] = KEY_SCROLLLOCK,
83}; 82};
84static unsigned short xlate[XLATE_SIZE] = { 83
84static const unsigned short xlate[XLATE_SIZE] = {
85 [NO_KEYCODE] = KEY_RESERVED, 85 [NO_KEYCODE] = KEY_RESERVED,
86 [KEY_SYM_SPACE] = KEY_SPACE, 86 [KEY_SYM_SPACE] = KEY_SPACE,
87 [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE, 87 [KEY_SYM_TILDE] = KEY_GRAVE, [KEY_SYM_BKTIC] = KEY_GRAVE,
@@ -133,19 +133,16 @@ static unsigned short xlate[XLATE_SIZE] = {
133 [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z, 133 [KEY_SYM_Z] = KEY_Z, [KEY_SYM_z] = KEY_Z,
134}; 134};
135 135
136static char remote_mouse_name[] = "ibmasm RSA I remote mouse";
137static char remote_keybd_name[] = "ibmasm RSA I remote keyboard";
138
139static void print_input(struct remote_input *input) 136static void print_input(struct remote_input *input)
140{ 137{
141 if (input->type == INPUT_TYPE_MOUSE) { 138 if (input->type == INPUT_TYPE_MOUSE) {
142 unsigned char buttons = input->mouse_buttons; 139 unsigned char buttons = input->mouse_buttons;
143 dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n", 140 dbg("remote mouse movement: (x,y)=(%d,%d)%s%s%s%s\n",
144 input->data.mouse.x, input->data.mouse.y, 141 input->data.mouse.x, input->data.mouse.y,
145 (buttons)?" -- buttons:":"", 142 (buttons) ? " -- buttons:" : "",
146 (buttons & REMOTE_BUTTON_LEFT)?"left ":"", 143 (buttons & REMOTE_BUTTON_LEFT) ? "left " : "",
147 (buttons & REMOTE_BUTTON_MIDDLE)?"middle ":"", 144 (buttons & REMOTE_BUTTON_MIDDLE) ? "middle " : "",
148 (buttons & REMOTE_BUTTON_RIGHT)?"right":"" 145 (buttons & REMOTE_BUTTON_RIGHT) ? "right" : ""
149 ); 146 );
150 } else { 147 } else {
151 dbg("remote keypress (code, flag, down):" 148 dbg("remote keypress (code, flag, down):"
@@ -180,7 +177,7 @@ static void send_keyboard_event(struct input_dev *dev,
180 key = xlate_high[code & 0xff]; 177 key = xlate_high[code & 0xff];
181 else 178 else
182 key = xlate[code]; 179 key = xlate[code];
183 input_report_key(dev, key, (input->data.keyboard.key_down) ? 1 : 0); 180 input_report_key(dev, key, input->data.keyboard.key_down);
184 input_sync(dev); 181 input_sync(dev);
185} 182}
186 183
@@ -228,20 +225,22 @@ int ibmasm_init_remote_input_dev(struct service_processor *sp)
228 mouse_dev->id.vendor = pdev->vendor; 225 mouse_dev->id.vendor = pdev->vendor;
229 mouse_dev->id.product = pdev->device; 226 mouse_dev->id.product = pdev->device;
230 mouse_dev->id.version = 1; 227 mouse_dev->id.version = 1;
228 mouse_dev->dev.parent = sp->dev;
231 mouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); 229 mouse_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
232 mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | 230 mouse_dev->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) |
233 BIT(BTN_RIGHT) | BIT(BTN_MIDDLE); 231 BIT(BTN_RIGHT) | BIT(BTN_MIDDLE);
234 set_bit(BTN_TOUCH, mouse_dev->keybit); 232 set_bit(BTN_TOUCH, mouse_dev->keybit);
235 mouse_dev->name = remote_mouse_name; 233 mouse_dev->name = "ibmasm RSA I remote mouse";
236 input_set_abs_params(mouse_dev, ABS_X, 0, xmax, 0, 0); 234 input_set_abs_params(mouse_dev, ABS_X, 0, MOUSE_X_MAX, 0, 0);
237 input_set_abs_params(mouse_dev, ABS_Y, 0, ymax, 0, 0); 235 input_set_abs_params(mouse_dev, ABS_Y, 0, MOUSE_Y_MAX, 0, 0);
238 236
239 mouse_dev->id.bustype = BUS_PCI; 237 keybd_dev->id.bustype = BUS_PCI;
240 keybd_dev->id.vendor = pdev->vendor; 238 keybd_dev->id.vendor = pdev->vendor;
241 keybd_dev->id.product = pdev->device; 239 keybd_dev->id.product = pdev->device;
242 mouse_dev->id.version = 2; 240 keybd_dev->id.version = 2;
241 keybd_dev->dev.parent = sp->dev;
243 keybd_dev->evbit[0] = BIT(EV_KEY); 242 keybd_dev->evbit[0] = BIT(EV_KEY);
244 keybd_dev->name = remote_keybd_name; 243 keybd_dev->name = "ibmasm RSA I remote keyboard";
245 244
246 for (i = 0; i < XLATE_SIZE; i++) { 245 for (i = 0; i < XLATE_SIZE; i++) {
247 if (xlate_high[i]) 246 if (xlate_high[i])
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index b7076a8442d2..72acf5af7a2a 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 * Orignally written by Pete Reynolds 23 * Orignally written by Pete Reynolds
24 */ 24 */
@@ -73,7 +73,7 @@ struct keyboard_input {
73 73
74 74
75 75
76struct remote_input { 76struct remote_input {
77 union { 77 union {
78 struct mouse_input mouse; 78 struct mouse_input mouse;
79 struct keyboard_input keyboard; 79 struct keyboard_input keyboard;
@@ -85,7 +85,7 @@ struct remote_input {
85 unsigned char pad3; 85 unsigned char pad3;
86}; 86};
87 87
88#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA) 88#define mouse_addr(sp) (sp->base_address + CONDOR_MOUSE_DATA)
89#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX) 89#define display_width(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESX)
90#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY) 90#define display_height(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_RESY)
91#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS) 91#define display_depth(sp) (mouse_addr(sp) + CONDOR_INPUT_DISPLAY_BITS)
@@ -93,7 +93,7 @@ struct remote_input {
93#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS) 93#define vnc_status(sp) (mouse_addr(sp) + CONDOR_OUTPUT_VNC_STATUS)
94#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) 94#define isr_control(sp) (mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
95 95
96#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) 96#define mouse_interrupt_pending(sp) readl(mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
97#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS) 97#define clear_mouse_interrupt(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_STATUS)
98#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) 98#define enable_mouse_interrupts(sp) writel(1, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
99#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL) 99#define disable_mouse_interrupts(sp) writel(0, mouse_addr(sp) + CONDOR_MOUSE_ISR_CONTROL)
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 9783caf49696..93baa350d698 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
18 * 18 *
19 * Copyright (C) IBM Corporation, 2004 19 * Copyright (C) IBM Corporation, 2004
20 * 20 *
21 * Author: Max Asböck <amax@us.ibm.com> 21 * Author: Max Asböck <amax@us.ibm.com>
22 * 22 *
23 */ 23 */
24 24
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4fb2089dc690..b53dac8d1b69 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -11,6 +11,7 @@
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/blkdev.h> 13#include <linux/blkdev.h>
14#include <linux/freezer.h>
14#include <linux/kthread.h> 15#include <linux/kthread.h>
15 16
16#include <linux/mmc/card.h> 17#include <linux/mmc/card.h>
@@ -44,11 +45,7 @@ static int mmc_queue_thread(void *d)
44 struct mmc_queue *mq = d; 45 struct mmc_queue *mq = d;
45 struct request_queue *q = mq->queue; 46 struct request_queue *q = mq->queue;
46 47
47 /* 48 current->flags |= PF_MEMALLOC;
48 * Set iothread to ensure that we aren't put to sleep by
49 * the process freezing. We handle suspension ourselves.
50 */
51 current->flags |= PF_MEMALLOC|PF_NOFREEZE;
52 49
53 down(&mq->thread_sem); 50 down(&mq->thread_sem);
54 do { 51 do {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 51bc7e2f1f22..ef89780eb9d6 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -16,6 +16,7 @@
16#include <linux/mtd/mtd.h> 16#include <linux/mtd/mtd.h>
17#include <linux/blkdev.h> 17#include <linux/blkdev.h>
18#include <linux/blkpg.h> 18#include <linux/blkpg.h>
19#include <linux/freezer.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/hdreg.h> 21#include <linux/hdreg.h>
21#include <linux/init.h> 22#include <linux/init.h>
@@ -80,7 +81,7 @@ static int mtd_blktrans_thread(void *arg)
80 struct request_queue *rq = tr->blkcore_priv->rq; 81 struct request_queue *rq = tr->blkcore_priv->rq;
81 82
82 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 83 /* we might get involved when memory gets low, so use PF_MEMALLOC */
83 current->flags |= PF_MEMALLOC | PF_NOFREEZE; 84 current->flags |= PF_MEMALLOC;
84 85
85 spin_lock_irq(rq->queue_lock); 86 spin_lock_irq(rq->queue_lock);
86 while (!kthread_should_stop()) { 87 while (!kthread_should_stop()) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 74002945b71b..7c6b223b3f8a 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -368,7 +368,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
368 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id); 368 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
369 struct ubi_vid_hdr *vid_hdr; 369 struct ubi_vid_hdr *vid_hdr;
370 struct ubi_volume *vol = ubi->volumes[idx]; 370 struct ubi_volume *vol = ubi->volumes[idx];
371 uint32_t crc, crc1; 371 uint32_t uninitialized_var(crc);
372 372
373 err = leb_read_lock(ubi, vol_id, lnum); 373 err = leb_read_lock(ubi, vol_id, lnum);
374 if (err) 374 if (err)
@@ -451,7 +451,7 @@ retry:
451 } 451 }
452 452
453 if (check) { 453 if (check) {
454 crc1 = crc32(UBI_CRC32_INIT, buf, len); 454 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
455 if (crc1 != crc) { 455 if (crc1 != crc) {
456 ubi_warn("CRC error: calculated %#08x, must be %#08x", 456 ubi_warn("CRC error: calculated %#08x, must be %#08x",
457 crc1, crc); 457 crc1, crc);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9ecaf77eca9e..ab2174a56bc2 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1346,6 +1346,7 @@ static int ubi_thread(void *u)
1346 ubi_msg("background thread \"%s\" started, PID %d", 1346 ubi_msg("background thread \"%s\" started, PID %d",
1347 ubi->bgt_name, current->pid); 1347 ubi->bgt_name, current->pid);
1348 1348
1349 set_freezable();
1349 for (;;) { 1350 for (;;) {
1350 int err; 1351 int err;
1351 1352
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
index 4a18b881ae9a..fd1e156f1747 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atl1/atl1_main.c
@@ -75,6 +75,7 @@
75#include <linux/compiler.h> 75#include <linux/compiler.h>
76#include <linux/delay.h> 76#include <linux/delay.h>
77#include <linux/mii.h> 77#include <linux/mii.h>
78#include <linux/interrupt.h>
78#include <net/checksum.h> 79#include <net/checksum.h>
79 80
80#include <asm/atomic.h> 81#include <asm/atomic.h>
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 9afa47edfc58..3c54014acece 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2292,10 +2292,15 @@ static int eepro100_resume(struct pci_dev *pdev)
2292 struct net_device *dev = pci_get_drvdata (pdev); 2292 struct net_device *dev = pci_get_drvdata (pdev);
2293 struct speedo_private *sp = netdev_priv(dev); 2293 struct speedo_private *sp = netdev_priv(dev);
2294 void __iomem *ioaddr = sp->regs; 2294 void __iomem *ioaddr = sp->regs;
2295 int rc;
2295 2296
2296 pci_set_power_state(pdev, PCI_D0); 2297 pci_set_power_state(pdev, PCI_D0);
2297 pci_restore_state(pdev); 2298 pci_restore_state(pdev);
2298 pci_enable_device(pdev); 2299
2300 rc = pci_enable_device(pdev);
2301 if (rc)
2302 return rc;
2303
2299 pci_set_master(pdev); 2304 pci_set_master(pdev);
2300 2305
2301 if (!netif_running(dev)) 2306 if (!netif_running(dev))
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 3450051ae56b..6bb48ba80964 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -671,7 +671,7 @@ static ssize_t natsemi_show_##_name(struct device *dev, \
671#define NATSEMI_CREATE_FILE(_dev, _name) \ 671#define NATSEMI_CREATE_FILE(_dev, _name) \
672 device_create_file(&_dev->dev, &dev_attr_##_name) 672 device_create_file(&_dev->dev, &dev_attr_##_name)
673#define NATSEMI_REMOVE_FILE(_dev, _name) \ 673#define NATSEMI_REMOVE_FILE(_dev, _name) \
674 device_create_file(&_dev->dev, &dev_attr_##_name) 674 device_remove_file(&_dev->dev, &dev_attr_##_name)
675 675
676NATSEMI_ATTR(dspcfg_workaround); 676NATSEMI_ATTR(dspcfg_workaround);
677 677
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 995c0a5d4066..cfdeaf7aa163 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -669,10 +669,15 @@ static int ne2k_pci_suspend (struct pci_dev *pdev, pm_message_t state)
669static int ne2k_pci_resume (struct pci_dev *pdev) 669static int ne2k_pci_resume (struct pci_dev *pdev)
670{ 670{
671 struct net_device *dev = pci_get_drvdata (pdev); 671 struct net_device *dev = pci_get_drvdata (pdev);
672 int rc;
672 673
673 pci_set_power_state(pdev, 0); 674 pci_set_power_state(pdev, 0);
674 pci_restore_state(pdev); 675 pci_restore_state(pdev);
675 pci_enable_device(pdev); 676
677 rc = pci_enable_device(pdev);
678 if (rc)
679 return rc;
680
676 NS8390_init(dev, 1); 681 NS8390_init(dev, 1);
677 netif_device_attach(dev); 682 netif_device_attach(dev);
678 683
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 982a9010c7a9..bb6896ae3151 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2338,7 +2338,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
2338{ 2338{
2339 struct skb_shared_info *info = skb_shinfo(skb); 2339 struct skb_shared_info *info = skb_shinfo(skb);
2340 unsigned int cur_frag, entry; 2340 unsigned int cur_frag, entry;
2341 struct TxDesc *txd; 2341 struct TxDesc * uninitialized_var(txd);
2342 2342
2343 entry = tp->cur_tx; 2343 entry = tp->cur_tx;
2344 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { 2344 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 58d7e5d452fa..f83bb5cb0d3d 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3692,7 +3692,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3692 __u16 rcode, correlator; 3692 __u16 rcode, correlator;
3693 int err = 0; 3693 int err = 0;
3694 __u8 xframe = 1; 3694 __u8 xframe = 1;
3695 __u16 tx_fstatus;
3696 3695
3697 rmf->vl = SWAP_BYTES(rmf->vl); 3696 rmf->vl = SWAP_BYTES(rmf->vl);
3698 if(rx_status & FCB_RX_STATUS_DA_MATCHED) 3697 if(rx_status & FCB_RX_STATUS_DA_MATCHED)
@@ -3783,7 +3782,9 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3783 } 3782 }
3784 break; 3783 break;
3785 3784
3786 case TX_FORWARD: 3785 case TX_FORWARD: {
3786 __u16 uninitialized_var(tx_fstatus);
3787
3787 if((rcode = smctr_rcv_tx_forward(dev, rmf)) 3788 if((rcode = smctr_rcv_tx_forward(dev, rmf))
3788 != POSITIVE_ACK) 3789 != POSITIVE_ACK)
3789 { 3790 {
@@ -3811,6 +3812,7 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3811 } 3812 }
3812 } 3813 }
3813 break; 3814 break;
3815 }
3814 3816
3815 /* Received MAC Frames Processed by CRS/REM/RPS. */ 3817 /* Received MAC Frames Processed by CRS/REM/RPS. */
3816 case RSP: 3818 case RSP:
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index ec1c556a47ca..5d8c78ee2cd9 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -2833,6 +2833,8 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
2833 int br, tc; 2833 int br, tc;
2834 int br_pwr, error; 2834 int br_pwr, error;
2835 2835
2836 *br_io = 0;
2837
2836 if (rate == 0) 2838 if (rate == 0)
2837 return (0); 2839 return (0);
2838 2840
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 35eded7ffb2d..1cc18e787a65 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -595,8 +595,8 @@ recv_frame( struct net_device *dev )
595 595
596 u32 crc = CRC32_INITIAL; 596 u32 crc = CRC32_INITIAL;
597 597
598 unsigned framelen, frameno, ack; 598 unsigned framelen = 0, frameno, ack;
599 unsigned is_first, frame_ok; 599 unsigned is_first, frame_ok = 0;
600 600
601 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) { 601 if( check_fhdr( ioaddr, &framelen, &frameno, &ack, &is_first, &crc ) ) {
602 frame_ok = framelen > 4 602 frame_ok = framelen > 4
@@ -604,8 +604,7 @@ recv_frame( struct net_device *dev )
604 : skip_tail( ioaddr, framelen, crc ); 604 : skip_tail( ioaddr, framelen, crc );
605 if( frame_ok ) 605 if( frame_ok )
606 interpret_ack( dev, ack ); 606 interpret_ack( dev, ack );
607 } else 607 }
608 frame_ok = 0;
609 608
610 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 ); 609 outb( inb( ioaddr + CSR0 ) ^ CT_ZER, ioaddr + CSR0 );
611 if( frame_ok ) { 610 if( frame_ok ) {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 1c54908fdc4c..ee1cc14db389 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3086,7 +3086,8 @@ static int airo_thread(void *data) {
3086 struct net_device *dev = data; 3086 struct net_device *dev = data;
3087 struct airo_info *ai = dev->priv; 3087 struct airo_info *ai = dev->priv;
3088 int locked; 3088 int locked;
3089 3089
3090 set_freezable();
3090 while(1) { 3091 while(1) {
3091 /* make swsusp happy with our thread */ 3092 /* make swsusp happy with our thread */
3092 try_to_freeze(); 3093 try_to_freeze();
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4a59306a3f05..9f366242c392 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -613,6 +613,7 @@ static int wlan_service_main_thread(void *data)
613 613
614 init_waitqueue_entry(&wait, current); 614 init_waitqueue_entry(&wait, current);
615 615
616 set_freezable();
616 for (;;) { 617 for (;;) {
617 lbs_deb_thread( "main-thread 111: intcounter=%d " 618 lbs_deb_thread( "main-thread 111: intcounter=%d "
618 "currenttxskb=%p dnld_sent=%d\n", 619 "currenttxskb=%p dnld_sent=%d\n",
diff --git a/drivers/parisc/hppb.c b/drivers/parisc/hppb.c
index a68b3b3761a2..a728a7cd2fc8 100644
--- a/drivers/parisc/hppb.c
+++ b/drivers/parisc/hppb.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/dma-mapping.h>
19#include <linux/ioport.h> 20#include <linux/ioport.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 50cad3a59a6c..7c93a108f9b8 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -651,6 +651,7 @@ static int pccardd(void *__skt)
651 add_wait_queue(&skt->thread_wait, &wait); 651 add_wait_queue(&skt->thread_wait, &wait);
652 complete(&skt->thread_done); 652 complete(&skt->thread_done);
653 653
654 set_freezable();
654 for (;;) { 655 for (;;) {
655 unsigned long flags; 656 unsigned long flags;
656 unsigned int events; 657 unsigned int events;
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 3b40f9623cc9..3c45142c40b2 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -113,7 +113,7 @@ MODULE_LICENSE("Dual MPL/GPL");
113#define CONFIG_PCMCIA_SLOT_B 113#define CONFIG_PCMCIA_SLOT_B
114#endif 114#endif
115 115
116#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */ 116#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
117 117
118#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B) 118#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
119 119
@@ -146,9 +146,9 @@ MODULE_LICENSE("Dual MPL/GPL");
146 146
147/* ------------------------------------------------------------------------- */ 147/* ------------------------------------------------------------------------- */
148 148
149#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */ 149#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
150#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */ 150#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
151#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */ 151#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
152/* ------------------------------------------------------------------------- */ 152/* ------------------------------------------------------------------------- */
153 153
154static int pcmcia_schlvl; 154static int pcmcia_schlvl;
@@ -169,8 +169,8 @@ static u32 *m8xx_pgcrx[2];
169 */ 169 */
170 170
171struct pcmcia_win { 171struct pcmcia_win {
172 u32 br; 172 u32 br;
173 u32 or; 173 u32 or;
174}; 174};
175 175
176/* 176/*
@@ -214,7 +214,7 @@ struct pcmcia_win {
214 214
215/* we keep one lookup table per socket to check flags */ 215/* we keep one lookup table per socket to check flags */
216 216
217#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */ 217#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
218 218
219struct event_table { 219struct event_table {
220 u32 regbit; 220 u32 regbit;
@@ -224,8 +224,8 @@ struct event_table {
224static const char driver_name[] = "m8xx-pcmcia"; 224static const char driver_name[] = "m8xx-pcmcia";
225 225
226struct socket_info { 226struct socket_info {
227 void (*handler)(void *info, u32 events); 227 void (*handler) (void *info, u32 events);
228 void *info; 228 void *info;
229 229
230 u32 slot; 230 u32 slot;
231 pcmconf8xx_t *pcmcia; 231 pcmconf8xx_t *pcmcia;
@@ -234,7 +234,7 @@ struct socket_info {
234 234
235 socket_state_t state; 235 socket_state_t state;
236 struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO]; 236 struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
237 struct pccard_io_map io_win[PCMCIA_IO_WIN_NO]; 237 struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
238 struct event_table events[PCMCIA_EVENTS_MAX]; 238 struct event_table events[PCMCIA_EVENTS_MAX];
239 struct pcmcia_socket socket; 239 struct pcmcia_socket socket;
240}; 240};
@@ -248,8 +248,7 @@ static struct socket_info socket[PCMCIA_SOCKETS_NO];
248 248
249#define M8XX_SIZES_NO 32 249#define M8XX_SIZES_NO 32
250 250
251static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = 251static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = {
252{
253 0x00000001, 0x00000002, 0x00000008, 0x00000004, 252 0x00000001, 0x00000002, 0x00000008, 0x00000004,
254 0x00000080, 0x00000040, 0x00000010, 0x00000020, 253 0x00000080, 0x00000040, 0x00000010, 0x00000020,
255 0x00008000, 0x00004000, 0x00001000, 0x00002000, 254 0x00008000, 0x00004000, 0x00001000, 0x00002000,
@@ -265,7 +264,7 @@ static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] =
265 264
266static irqreturn_t m8xx_interrupt(int irq, void *dev); 265static irqreturn_t m8xx_interrupt(int irq, void *dev);
267 266
268#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ 267#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
269 268
270/* ------------------------------------------------------------------------- */ 269/* ------------------------------------------------------------------------- */
271/* board specific stuff: */ 270/* board specific stuff: */
@@ -289,8 +288,9 @@ static int voltage_set(int slot, int vcc, int vpp)
289{ 288{
290 u32 reg = 0; 289 u32 reg = 0;
291 290
292 switch(vcc) { 291 switch (vcc) {
293 case 0: break; 292 case 0:
293 break;
294 case 33: 294 case 33:
295 reg |= BCSR1_PCVCTL4; 295 reg |= BCSR1_PCVCTL4;
296 break; 296 break;
@@ -301,11 +301,12 @@ static int voltage_set(int slot, int vcc, int vpp)
301 return 1; 301 return 1;
302 } 302 }
303 303
304 switch(vpp) { 304 switch (vpp) {
305 case 0: break; 305 case 0:
306 break;
306 case 33: 307 case 33:
307 case 50: 308 case 50:
308 if(vcc == vpp) 309 if (vcc == vpp)
309 reg |= BCSR1_PCVCTL6; 310 reg |= BCSR1_PCVCTL6;
310 else 311 else
311 return 1; 312 return 1;
@@ -316,25 +317,29 @@ static int voltage_set(int slot, int vcc, int vpp)
316 return 1; 317 return 1;
317 } 318 }
318 319
319 if(!((vcc == 50) || (vcc == 0))) 320 if (!((vcc == 50) || (vcc == 0)))
320 return 1; 321 return 1;
321 322
322 /* first, turn off all power */ 323 /* first, turn off all power */
323 324
324 out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | BCSR1_PCVCTL5 | BCSR1_PCVCTL6 | BCSR1_PCVCTL7)); 325 out_be32(((u32 *) RPX_CSR_ADDR),
326 in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 |
327 BCSR1_PCVCTL5 |
328 BCSR1_PCVCTL6 |
329 BCSR1_PCVCTL7));
325 330
326 /* enable new powersettings */ 331 /* enable new powersettings */
327 332
328 out_be32(((u32 *)RPX_CSR_ADDR), in_be32(((u32 *)RPX_CSR_ADDR)) | reg); 333 out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg);
329 334
330 return 0; 335 return 0;
331} 336}
332 337
333#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V 338#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
334#define hardware_enable(_slot_) /* No hardware to enable */ 339#define hardware_enable(_slot_) /* No hardware to enable */
335#define hardware_disable(_slot_) /* No hardware to disable */ 340#define hardware_disable(_slot_) /* No hardware to disable */
336 341
337#endif /* CONFIG_RPXCLASSIC */ 342#endif /* CONFIG_RPXCLASSIC */
338 343
339/* FADS Boards from Motorola */ 344/* FADS Boards from Motorola */
340 345
@@ -346,43 +351,45 @@ static int voltage_set(int slot, int vcc, int vpp)
346{ 351{
347 u32 reg = 0; 352 u32 reg = 0;
348 353
349 switch(vcc) { 354 switch (vcc) {
350 case 0: 355 case 0:
351 break; 356 break;
352 case 33: 357 case 33:
353 reg |= BCSR1_PCCVCC0; 358 reg |= BCSR1_PCCVCC0;
354 break; 359 break;
355 case 50: 360 case 50:
356 reg |= BCSR1_PCCVCC1; 361 reg |= BCSR1_PCCVCC1;
357 break; 362 break;
358 default: 363 default:
359 return 1; 364 return 1;
360 } 365 }
361 366
362 switch(vpp) { 367 switch (vpp) {
363 case 0: 368 case 0:
364 break; 369 break;
365 case 33: 370 case 33:
366 case 50: 371 case 50:
367 if(vcc == vpp) 372 if (vcc == vpp)
368 reg |= BCSR1_PCCVPP1; 373 reg |= BCSR1_PCCVPP1;
369 else 374 else
370 return 1;
371 break;
372 case 120:
373 if ((vcc == 33) || (vcc == 50))
374 reg |= BCSR1_PCCVPP0;
375 else
376 return 1;
377 default:
378 return 1; 375 return 1;
376 break;
377 case 120:
378 if ((vcc == 33) || (vcc == 50))
379 reg |= BCSR1_PCCVPP0;
380 else
381 return 1;
382 default:
383 return 1;
379 } 384 }
380 385
381 /* first, turn off all power */ 386 /* first, turn off all power */
382 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~(BCSR1_PCCVCC_MASK | BCSR1_PCCVPP_MASK)); 387 out_be32((u32 *) BCSR1,
388 in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK |
389 BCSR1_PCCVPP_MASK));
383 390
384 /* enable new powersettings */ 391 /* enable new powersettings */
385 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | reg); 392 out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg);
386 393
387 return 0; 394 return 0;
388} 395}
@@ -391,12 +398,12 @@ static int voltage_set(int slot, int vcc, int vpp)
391 398
392static void hardware_enable(int slot) 399static void hardware_enable(int slot)
393{ 400{
394 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) & ~BCSR1_PCCEN); 401 out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN);
395} 402}
396 403
397static void hardware_disable(int slot) 404static void hardware_disable(int slot)
398{ 405{
399 out_be32((u32 *)BCSR1, in_be32((u32 *)BCSR1) | BCSR1_PCCEN); 406 out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN);
400} 407}
401 408
402#endif 409#endif
@@ -410,7 +417,7 @@ static void hardware_disable(int slot)
410 417
411static inline void hardware_enable(int slot) 418static inline void hardware_enable(int slot)
412{ 419{
413 m8xx_pcmcia_ops.hw_ctrl(slot, 1); 420 m8xx_pcmcia_ops.hw_ctrl(slot, 1);
414} 421}
415 422
416static inline void hardware_disable(int slot) 423static inline void hardware_disable(int slot)
@@ -436,52 +443,53 @@ static int voltage_set(int slot, int vcc, int vpp)
436{ 443{
437 u8 reg = 0; 444 u8 reg = 0;
438 445
439 switch(vcc) { 446 switch (vcc) {
440 case 0: 447 case 0:
441 break; 448 break;
442 case 33: 449 case 33:
443 reg |= CSR2_VCC_33; 450 reg |= CSR2_VCC_33;
444 break; 451 break;
445 case 50: 452 case 50:
446 reg |= CSR2_VCC_50; 453 reg |= CSR2_VCC_50;
447 break; 454 break;
448 default: 455 default:
449 return 1; 456 return 1;
450 } 457 }
451 458
452 switch(vpp) { 459 switch (vpp) {
453 case 0: 460 case 0:
454 break; 461 break;
455 case 33: 462 case 33:
456 case 50: 463 case 50:
457 if(vcc == vpp) 464 if (vcc == vpp)
458 reg |= CSR2_VPP_VCC; 465 reg |= CSR2_VPP_VCC;
459 else 466 else
460 return 1; 467 return 1;
461 break; 468 break;
462 case 120: 469 case 120:
463 if ((vcc == 33) || (vcc == 50)) 470 if ((vcc == 33) || (vcc == 50))
464 reg |= CSR2_VPP_12; 471 reg |= CSR2_VPP_12;
465 else 472 else
466 return 1;
467 default:
468 return 1; 473 return 1;
474 default:
475 return 1;
469 } 476 }
470 477
471 /* first, turn off all power */ 478 /* first, turn off all power */
472 out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); 479 out_8((u8 *) MBX_CSR2_ADDR,
480 in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK));
473 481
474 /* enable new powersettings */ 482 /* enable new powersettings */
475 out_8((u8 *)MBX_CSR2_ADDR, in_8((u8 *)MBX_CSR2_ADDR) | reg); 483 out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg);
476 484
477 return 0; 485 return 0;
478} 486}
479 487
480#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V 488#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
481#define hardware_enable(_slot_) /* No hardware to enable */ 489#define hardware_enable(_slot_) /* No hardware to enable */
482#define hardware_disable(_slot_) /* No hardware to disable */ 490#define hardware_disable(_slot_) /* No hardware to disable */
483 491
484#endif /* CONFIG_MBX */ 492#endif /* CONFIG_MBX */
485 493
486#if defined(CONFIG_PRxK) 494#if defined(CONFIG_PRxK)
487#include <asm/cpld.h> 495#include <asm/cpld.h>
@@ -495,43 +503,46 @@ static int voltage_set(int slot, int vcc, int vpp)
495 u8 regread; 503 u8 regread;
496 cpld_regs *ccpld = get_cpld(); 504 cpld_regs *ccpld = get_cpld();
497 505
498 switch(vcc) { 506 switch (vcc) {
499 case 0: 507 case 0:
500 break; 508 break;
501 case 33: 509 case 33:
502 reg |= PCMCIA_VCC_33; 510 reg |= PCMCIA_VCC_33;
503 break; 511 break;
504 case 50: 512 case 50:
505 reg |= PCMCIA_VCC_50; 513 reg |= PCMCIA_VCC_50;
506 break; 514 break;
507 default: 515 default:
508 return 1; 516 return 1;
509 } 517 }
510 518
511 switch(vpp) { 519 switch (vpp) {
512 case 0: 520 case 0:
513 break; 521 break;
514 case 33: 522 case 33:
515 case 50: 523 case 50:
516 if(vcc == vpp) 524 if (vcc == vpp)
517 reg |= PCMCIA_VPP_VCC; 525 reg |= PCMCIA_VPP_VCC;
518 else 526 else
519 return 1;
520 break;
521 case 120:
522 if ((vcc == 33) || (vcc == 50))
523 reg |= PCMCIA_VPP_12;
524 else
525 return 1;
526 default:
527 return 1; 527 return 1;
528 break;
529 case 120:
530 if ((vcc == 33) || (vcc == 50))
531 reg |= PCMCIA_VPP_12;
532 else
533 return 1;
534 default:
535 return 1;
528 } 536 }
529 537
530 reg = reg >> (slot << 2); 538 reg = reg >> (slot << 2);
531 regread = in_8(&ccpld->fpga_pc_ctl); 539 regread = in_8(&ccpld->fpga_pc_ctl);
532 if (reg != (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) { 540 if (reg !=
541 (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
533 /* enable new powersettings */ 542 /* enable new powersettings */
534 regread = regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)); 543 regread =
544 regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >>
545 (slot << 2));
535 out_8(&ccpld->fpga_pc_ctl, reg | regread); 546 out_8(&ccpld->fpga_pc_ctl, reg | regread);
536 msleep(100); 547 msleep(100);
537 } 548 }
@@ -540,10 +551,10 @@ static int voltage_set(int slot, int vcc, int vpp)
540} 551}
541 552
542#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV 553#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
543#define hardware_enable(_slot_) /* No hardware to enable */ 554#define hardware_enable(_slot_) /* No hardware to enable */
544#define hardware_disable(_slot_) /* No hardware to disable */ 555#define hardware_disable(_slot_) /* No hardware to disable */
545 556
546#endif /* CONFIG_PRxK */ 557#endif /* CONFIG_PRxK */
547 558
548static u32 pending_events[PCMCIA_SOCKETS_NO]; 559static u32 pending_events[PCMCIA_SOCKETS_NO];
549static DEFINE_SPINLOCK(pending_event_lock); 560static DEFINE_SPINLOCK(pending_event_lock);
@@ -553,7 +564,7 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
553 struct socket_info *s; 564 struct socket_info *s;
554 struct event_table *e; 565 struct event_table *e;
555 unsigned int i, events, pscr, pipr, per; 566 unsigned int i, events, pscr, pipr, per;
556 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 567 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
557 568
558 dprintk("Interrupt!\n"); 569 dprintk("Interrupt!\n");
559 /* get interrupt sources */ 570 /* get interrupt sources */
@@ -562,16 +573,16 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
562 pipr = in_be32(&pcmcia->pcmc_pipr); 573 pipr = in_be32(&pcmcia->pcmc_pipr);
563 per = in_be32(&pcmcia->pcmc_per); 574 per = in_be32(&pcmcia->pcmc_per);
564 575
565 for(i = 0; i < PCMCIA_SOCKETS_NO; i++) { 576 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
566 s = &socket[i]; 577 s = &socket[i];
567 e = &s->events[0]; 578 e = &s->events[0];
568 events = 0; 579 events = 0;
569 580
570 while(e->regbit) { 581 while (e->regbit) {
571 if(pscr & e->regbit) 582 if (pscr & e->regbit)
572 events |= e->eventbit; 583 events |= e->eventbit;
573 584
574 e++; 585 e++;
575 } 586 }
576 587
577 /* 588 /*
@@ -579,13 +590,11 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
579 * not too nice done, 590 * not too nice done,
580 * we depend on that CD2 is the bit to the left of CD1... 591 * we depend on that CD2 is the bit to the left of CD1...
581 */ 592 */
582 if(events & SS_DETECT) 593 if (events & SS_DETECT)
583 if(((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^ 594 if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
584 (pipr & M8XX_PCMCIA_CD1(i))) 595 (pipr & M8XX_PCMCIA_CD1(i))) {
585 {
586 events &= ~SS_DETECT; 596 events &= ~SS_DETECT;
587 } 597 }
588
589#ifdef PCMCIA_GLITCHY_CD 598#ifdef PCMCIA_GLITCHY_CD
590 /* 599 /*
591 * I've experienced CD problems with my ADS board. 600 * I've experienced CD problems with my ADS board.
@@ -593,24 +602,23 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev)
593 * real change of Card detection. 602 * real change of Card detection.
594 */ 603 */
595 604
596 if((events & SS_DETECT) && 605 if ((events & SS_DETECT) &&
597 ((pipr & 606 ((pipr &
598 (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) && 607 (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
599 (s->state.Vcc | s->state.Vpp)) { 608 (s->state.Vcc | s->state.Vpp)) {
600 events &= ~SS_DETECT; 609 events &= ~SS_DETECT;
601 /*printk( "CD glitch workaround - CD = 0x%08x!\n", 610 /*printk( "CD glitch workaround - CD = 0x%08x!\n",
602 (pipr & (M8XX_PCMCIA_CD2(i) 611 (pipr & (M8XX_PCMCIA_CD2(i)
603 | M8XX_PCMCIA_CD1(i))));*/ 612 | M8XX_PCMCIA_CD1(i)))); */
604 } 613 }
605#endif 614#endif
606 615
607 /* call the handler */ 616 /* call the handler */
608 617
609 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, " 618 dprintk("slot %u: events = 0x%02x, pscr = 0x%08x, "
610 "pipr = 0x%08x\n", 619 "pipr = 0x%08x\n", i, events, pscr, pipr);
611 i, events, pscr, pipr);
612 620
613 if(events) { 621 if (events) {
614 spin_lock(&pending_event_lock); 622 spin_lock(&pending_event_lock);
615 pending_events[i] |= events; 623 pending_events[i] |= events;
616 spin_unlock(&pending_event_lock); 624 spin_unlock(&pending_event_lock);
@@ -643,11 +651,11 @@ static u32 m8xx_get_graycode(u32 size)
643{ 651{
644 u32 k; 652 u32 k;
645 653
646 for(k = 0; k < M8XX_SIZES_NO; k++) 654 for (k = 0; k < M8XX_SIZES_NO; k++)
647 if(m8xx_size_to_gray[k] == size) 655 if (m8xx_size_to_gray[k] == size)
648 break; 656 break;
649 657
650 if((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1)) 658 if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
651 k = -1; 659 k = -1;
652 660
653 return k; 661 return k;
@@ -657,7 +665,7 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
657{ 665{
658 u32 reg, clocks, psst, psl, psht; 666 u32 reg, clocks, psst, psl, psht;
659 667
660 if(!ns) { 668 if (!ns) {
661 669
662 /* 670 /*
663 * We get called with IO maps setup to 0ns 671 * We get called with IO maps setup to 0ns
@@ -665,10 +673,10 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
665 * They should be 255ns. 673 * They should be 255ns.
666 */ 674 */
667 675
668 if(is_io) 676 if (is_io)
669 ns = 255; 677 ns = 255;
670 else 678 else
671 ns = 100; /* fast memory if 0 */ 679 ns = 100; /* fast memory if 0 */
672 } 680 }
673 681
674 /* 682 /*
@@ -679,23 +687,23 @@ static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
679 687
680/* how we want to adjust the timing - in percent */ 688/* how we want to adjust the timing - in percent */
681 689
682#define ADJ 180 /* 80 % longer accesstime - to be sure */ 690#define ADJ 180 /* 80 % longer accesstime - to be sure */
683 691
684 clocks = ((bus_freq / 1000) * ns) / 1000; 692 clocks = ((bus_freq / 1000) * ns) / 1000;
685 clocks = (clocks * ADJ) / (100*1000); 693 clocks = (clocks * ADJ) / (100 * 1000);
686 if(clocks >= PCMCIA_BMT_LIMIT) { 694 if (clocks >= PCMCIA_BMT_LIMIT) {
687 printk( "Max access time limit reached\n"); 695 printk("Max access time limit reached\n");
688 clocks = PCMCIA_BMT_LIMIT-1; 696 clocks = PCMCIA_BMT_LIMIT - 1;
689 } 697 }
690 698
691 psst = clocks / 7; /* setup time */ 699 psst = clocks / 7; /* setup time */
692 psht = clocks / 7; /* hold time */ 700 psht = clocks / 7; /* hold time */
693 psl = (clocks * 5) / 7; /* strobe length */ 701 psl = (clocks * 5) / 7; /* strobe length */
694 702
695 psst += clocks - (psst + psht + psl); 703 psst += clocks - (psst + psht + psl);
696 704
697 reg = psst << 12; 705 reg = psst << 12;
698 reg |= psl << 7; 706 reg |= psl << 7;
699 reg |= psht << 16; 707 reg |= psht << 16;
700 708
701 return reg; 709 return reg;
@@ -710,8 +718,8 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
710 718
711 pipr = in_be32(&pcmcia->pcmc_pipr); 719 pipr = in_be32(&pcmcia->pcmc_pipr);
712 720
713 *value = ((pipr & (M8XX_PCMCIA_CD1(lsock) 721 *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
714 | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0; 722 | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
715 *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0; 723 *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
716 724
717 if (s->state.flags & SS_IOCARD) 725 if (s->state.flags & SS_IOCARD)
@@ -795,16 +803,16 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
795 /* read out VS1 and VS2 */ 803 /* read out VS1 and VS2 */
796 804
797 reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock)) 805 reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
798 >> M8XX_PCMCIA_VS_SHIFT(lsock); 806 >> M8XX_PCMCIA_VS_SHIFT(lsock);
799 807
800 if(socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) { 808 if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
801 switch(reg) { 809 switch (reg) {
802 case 1: 810 case 1:
803 *value |= SS_3VCARD; 811 *value |= SS_3VCARD;
804 break; /* GND, NC - 3.3V only */ 812 break; /* GND, NC - 3.3V only */
805 case 2: 813 case 2:
806 *value |= SS_XVCARD; 814 *value |= SS_XVCARD;
807 break; /* NC. GND - x.xV only */ 815 break; /* NC. GND - x.xV only */
808 }; 816 };
809 } 817 }
810 818
@@ -812,7 +820,7 @@ static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
812 return 0; 820 return 0;
813} 821}
814 822
815static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state) 823static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
816{ 824{
817 int lsock = container_of(sock, struct socket_info, socket)->slot; 825 int lsock = container_of(sock, struct socket_info, socket)->slot;
818 struct socket_info *s = &socket[lsock]; 826 struct socket_info *s = &socket[lsock];
@@ -821,20 +829,20 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
821 unsigned long flags; 829 unsigned long flags;
822 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 830 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
823 831
824 dprintk( "SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, " 832 dprintk("SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
825 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags, 833 "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
826 state->Vcc, state->Vpp, state->io_irq, state->csc_mask); 834 state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
827 835
828 /* First, set voltage - bail out if invalid */ 836 /* First, set voltage - bail out if invalid */
829 if(voltage_set(lsock, state->Vcc, state->Vpp)) 837 if (voltage_set(lsock, state->Vcc, state->Vpp))
830 return -EINVAL; 838 return -EINVAL;
831 839
832
833 /* Take care of reset... */ 840 /* Take care of reset... */
834 if(state->flags & SS_RESET) 841 if (state->flags & SS_RESET)
835 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */ 842 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
836 else 843 else
837 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET); 844 out_be32(M8XX_PGCRX(lsock),
845 in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
838 846
839 /* ... and output enable. */ 847 /* ... and output enable. */
840 848
@@ -846,10 +854,11 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
846 no pullups are present -> the cards act wierd. 854 no pullups are present -> the cards act wierd.
847 So right now the buffers are enabled if the power is on. */ 855 So right now the buffers are enabled if the power is on. */
848 856
849 if(state->Vcc || state->Vpp) 857 if (state->Vcc || state->Vpp)
850 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */ 858 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
851 else 859 else
852 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE); 860 out_be32(M8XX_PGCRX(lsock),
861 in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
853 862
854 /* 863 /*
855 * We'd better turn off interrupts before 864 * We'd better turn off interrupts before
@@ -866,17 +875,17 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
866 e = &s->events[0]; 875 e = &s->events[0];
867 reg = 0; 876 reg = 0;
868 877
869 if(state->csc_mask & SS_DETECT) { 878 if (state->csc_mask & SS_DETECT) {
870 e->eventbit = SS_DETECT; 879 e->eventbit = SS_DETECT;
871 reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock) 880 reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
872 | M8XX_PCMCIA_CD1(lsock)); 881 | M8XX_PCMCIA_CD1(lsock));
873 e++; 882 e++;
874 } 883 }
875 if(state->flags & SS_IOCARD) { 884 if (state->flags & SS_IOCARD) {
876 /* 885 /*
877 * I/O card 886 * I/O card
878 */ 887 */
879 if(state->csc_mask & SS_STSCHG) { 888 if (state->csc_mask & SS_STSCHG) {
880 e->eventbit = SS_STSCHG; 889 e->eventbit = SS_STSCHG;
881 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); 890 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
882 e++; 891 e++;
@@ -884,9 +893,10 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
884 /* 893 /*
885 * If io_irq is non-zero we should enable irq. 894 * If io_irq is non-zero we should enable irq.
886 */ 895 */
887 if(state->io_irq) { 896 if (state->io_irq) {
888 out_be32(M8XX_PGCRX(lsock), 897 out_be32(M8XX_PGCRX(lsock),
889 in_be32(M8XX_PGCRX(lsock)) | mk_int_int_mask(s->hwirq) << 24); 898 in_be32(M8XX_PGCRX(lsock)) |
899 mk_int_int_mask(s->hwirq) << 24);
890 /* 900 /*
891 * Strange thing here: 901 * Strange thing here:
892 * The manual does not tell us which interrupt 902 * The manual does not tell us which interrupt
@@ -897,33 +907,32 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
897 * have to be cleared in PSCR in the interrupt handler. 907 * have to be cleared in PSCR in the interrupt handler.
898 */ 908 */
899 reg |= M8XX_PCMCIA_RDY_L(lsock); 909 reg |= M8XX_PCMCIA_RDY_L(lsock);
900 } 910 } else
901 else 911 out_be32(M8XX_PGCRX(lsock),
902 out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff); 912 in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
903 } 913 } else {
904 else {
905 /* 914 /*
906 * Memory card 915 * Memory card
907 */ 916 */
908 if(state->csc_mask & SS_BATDEAD) { 917 if (state->csc_mask & SS_BATDEAD) {
909 e->eventbit = SS_BATDEAD; 918 e->eventbit = SS_BATDEAD;
910 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock); 919 reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
911 e++; 920 e++;
912 } 921 }
913 if(state->csc_mask & SS_BATWARN) { 922 if (state->csc_mask & SS_BATWARN) {
914 e->eventbit = SS_BATWARN; 923 e->eventbit = SS_BATWARN;
915 reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock); 924 reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
916 e++; 925 e++;
917 } 926 }
918 /* What should I trigger on - low/high,raise,fall? */ 927 /* What should I trigger on - low/high,raise,fall? */
919 if(state->csc_mask & SS_READY) { 928 if (state->csc_mask & SS_READY) {
920 e->eventbit = SS_READY; 929 e->eventbit = SS_READY;
921 reg |= e->regbit = 0; //?? 930 reg |= e->regbit = 0; //??
922 e++; 931 e++;
923 } 932 }
924 } 933 }
925 934
926 e->regbit = 0; /* terminate list */ 935 e->regbit = 0; /* terminate list */
927 936
928 /* 937 /*
929 * Clear the status changed . 938 * Clear the status changed .
@@ -940,7 +949,9 @@ static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
940 * Ones will enable the interrupt. 949 * Ones will enable the interrupt.
941 */ 950 */
942 951
943 reg |= in_be32(&pcmcia->pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); 952 reg |=
953 in_be32(&pcmcia->
954 pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
944 out_be32(&pcmcia->pcmc_per, reg); 955 out_be32(&pcmcia->pcmc_per, reg);
945 956
946 spin_unlock_irqrestore(&events_lock, flags); 957 spin_unlock_irqrestore(&events_lock, flags);
@@ -961,67 +972,66 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
961 unsigned int reg, winnr; 972 unsigned int reg, winnr;
962 pcmconf8xx_t *pcmcia = s->pcmcia; 973 pcmconf8xx_t *pcmcia = s->pcmcia;
963 974
964
965#define M8XX_SIZE (io->stop - io->start + 1) 975#define M8XX_SIZE (io->stop - io->start + 1)
966#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) 976#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
967 977
968 dprintk( "SetIOMap(%d, %d, %#2.2x, %d ns, " 978 dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, "
969 "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags, 979 "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags,
970 io->speed, io->start, io->stop); 980 io->speed, io->start, io->stop);
971 981
972 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff) 982 if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
973 || (io->stop > 0xffff) || (io->stop < io->start)) 983 || (io->stop > 0xffff) || (io->stop < io->start))
974 return -EINVAL; 984 return -EINVAL;
975 985
976 if((reg = m8xx_get_graycode(M8XX_SIZE)) == -1) 986 if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
977 return -EINVAL; 987 return -EINVAL;
978 988
979 if(io->flags & MAP_ACTIVE) { 989 if (io->flags & MAP_ACTIVE) {
980 990
981 dprintk( "io->flags & MAP_ACTIVE\n"); 991 dprintk("io->flags & MAP_ACTIVE\n");
982 992
983 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 993 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
984 + (lsock * PCMCIA_IO_WIN_NO) + io->map; 994 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
985 995
986 /* setup registers */ 996 /* setup registers */
987 997
988 w = (void *) &pcmcia->pcmc_pbr0; 998 w = (void *)&pcmcia->pcmc_pbr0;
989 w += winnr; 999 w += winnr;
990 1000
991 out_be32(&w->or, 0); /* turn off window first */ 1001 out_be32(&w->or, 0); /* turn off window first */
992 out_be32(&w->br, M8XX_BASE); 1002 out_be32(&w->br, M8XX_BASE);
993 1003
994 reg <<= 27; 1004 reg <<= 27;
995 reg |= M8XX_PCMCIA_POR_IO |(lsock << 2); 1005 reg |= M8XX_PCMCIA_POR_IO | (lsock << 2);
996 1006
997 reg |= m8xx_get_speed(io->speed, 1, s->bus_freq); 1007 reg |= m8xx_get_speed(io->speed, 1, s->bus_freq);
998 1008
999 if(io->flags & MAP_WRPROT) 1009 if (io->flags & MAP_WRPROT)
1000 reg |= M8XX_PCMCIA_POR_WRPROT; 1010 reg |= M8XX_PCMCIA_POR_WRPROT;
1001 1011
1002 /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ))*/ 1012 /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */
1003 if(io->flags & MAP_16BIT) 1013 if (io->flags & MAP_16BIT)
1004 reg |= M8XX_PCMCIA_POR_16BIT; 1014 reg |= M8XX_PCMCIA_POR_16BIT;
1005 1015
1006 if(io->flags & MAP_ACTIVE) 1016 if (io->flags & MAP_ACTIVE)
1007 reg |= M8XX_PCMCIA_POR_VALID; 1017 reg |= M8XX_PCMCIA_POR_VALID;
1008 1018
1009 out_be32(&w->or, reg); 1019 out_be32(&w->or, reg);
1010 1020
1011 dprintk("Socket %u: Mapped io window %u at %#8.8x, " 1021 dprintk("Socket %u: Mapped io window %u at %#8.8x, "
1012 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1022 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
1013 } else { 1023 } else {
1014 /* shutdown IO window */ 1024 /* shutdown IO window */
1015 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO) 1025 winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
1016 + (lsock * PCMCIA_IO_WIN_NO) + io->map; 1026 + (lsock * PCMCIA_IO_WIN_NO) + io->map;
1017 1027
1018 /* setup registers */ 1028 /* setup registers */
1019 1029
1020 w = (void *) &pcmcia->pcmc_pbr0; 1030 w = (void *)&pcmcia->pcmc_pbr0;
1021 w += winnr; 1031 w += winnr;
1022 1032
1023 out_be32(&w->or, 0); /* turn off window */ 1033 out_be32(&w->or, 0); /* turn off window */
1024 out_be32(&w->br, 0); /* turn off base address */ 1034 out_be32(&w->br, 0); /* turn off base address */
1025 1035
1026 dprintk("Socket %u: Unmapped io window %u at %#8.8x, " 1036 dprintk("Socket %u: Unmapped io window %u at %#8.8x, "
1027 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or); 1037 "OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
@@ -1029,15 +1039,14 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
1029 1039
1030 /* copy the struct and modify the copy */ 1040 /* copy the struct and modify the copy */
1031 s->io_win[io->map] = *io; 1041 s->io_win[io->map] = *io;
1032 s->io_win[io->map].flags &= (MAP_WRPROT 1042 s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
1033 | MAP_16BIT
1034 | MAP_ACTIVE);
1035 dprintk("SetIOMap exit\n"); 1043 dprintk("SetIOMap exit\n");
1036 1044
1037 return 0; 1045 return 0;
1038} 1046}
1039 1047
1040static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) 1048static int m8xx_set_mem_map(struct pcmcia_socket *sock,
1049 struct pccard_mem_map *mem)
1041{ 1050{
1042 int lsock = container_of(sock, struct socket_info, socket)->slot; 1051 int lsock = container_of(sock, struct socket_info, socket)->slot;
1043 struct socket_info *s = &socket[lsock]; 1052 struct socket_info *s = &socket[lsock];
@@ -1046,19 +1055,19 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
1046 unsigned int reg, winnr; 1055 unsigned int reg, winnr;
1047 pcmconf8xx_t *pcmcia = s->pcmcia; 1056 pcmconf8xx_t *pcmcia = s->pcmcia;
1048 1057
1049 dprintk( "SetMemMap(%d, %d, %#2.2x, %d ns, " 1058 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1050 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, 1059 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
1051 mem->speed, mem->static_start, mem->card_start); 1060 mem->speed, mem->static_start, mem->card_start);
1052 1061
1053 if ((mem->map >= PCMCIA_MEM_WIN_NO) 1062 if ((mem->map >= PCMCIA_MEM_WIN_NO)
1054// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE) 1063// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
1055 || (mem->card_start >= 0x04000000) 1064 || (mem->card_start >= 0x04000000)
1056 || (mem->static_start & 0xfff) /* 4KByte resolution */ 1065 || (mem->static_start & 0xfff) /* 4KByte resolution */
1057 || (mem->card_start & 0xfff)) 1066 ||(mem->card_start & 0xfff))
1058 return -EINVAL; 1067 return -EINVAL;
1059 1068
1060 if((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) { 1069 if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
1061 printk( "Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE); 1070 printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
1062 return -EINVAL; 1071 return -EINVAL;
1063 } 1072 }
1064 reg <<= 27; 1073 reg <<= 27;
@@ -1067,50 +1076,47 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *m
1067 1076
1068 /* Setup the window in the pcmcia controller */ 1077 /* Setup the window in the pcmcia controller */
1069 1078
1070 w = (void *) &pcmcia->pcmc_pbr0; 1079 w = (void *)&pcmcia->pcmc_pbr0;
1071 w += winnr; 1080 w += winnr;
1072 1081
1073 reg |= lsock << 2; 1082 reg |= lsock << 2;
1074 1083
1075 reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq); 1084 reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq);
1076 1085
1077 if(mem->flags & MAP_ATTRIB) 1086 if (mem->flags & MAP_ATTRIB)
1078 reg |= M8XX_PCMCIA_POR_ATTRMEM; 1087 reg |= M8XX_PCMCIA_POR_ATTRMEM;
1079 1088
1080 if(mem->flags & MAP_WRPROT) 1089 if (mem->flags & MAP_WRPROT)
1081 reg |= M8XX_PCMCIA_POR_WRPROT; 1090 reg |= M8XX_PCMCIA_POR_WRPROT;
1082 1091
1083 if(mem->flags & MAP_16BIT) 1092 if (mem->flags & MAP_16BIT)
1084 reg |= M8XX_PCMCIA_POR_16BIT; 1093 reg |= M8XX_PCMCIA_POR_16BIT;
1085 1094
1086 if(mem->flags & MAP_ACTIVE) 1095 if (mem->flags & MAP_ACTIVE)
1087 reg |= M8XX_PCMCIA_POR_VALID; 1096 reg |= M8XX_PCMCIA_POR_VALID;
1088 1097
1089 out_be32(&w->or, reg); 1098 out_be32(&w->or, reg);
1090 1099
1091 dprintk("Socket %u: Mapped memory window %u at %#8.8x, " 1100 dprintk("Socket %u: Mapped memory window %u at %#8.8x, "
1092 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or); 1101 "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
1093 1102
1094 if(mem->flags & MAP_ACTIVE) { 1103 if (mem->flags & MAP_ACTIVE) {
1095 /* get the new base address */ 1104 /* get the new base address */
1096 mem->static_start = PCMCIA_MEM_WIN_BASE + 1105 mem->static_start = PCMCIA_MEM_WIN_BASE +
1097 (PCMCIA_MEM_WIN_SIZE * winnr) 1106 (PCMCIA_MEM_WIN_SIZE * winnr)
1098 + mem->card_start; 1107 + mem->card_start;
1099 } 1108 }
1100 1109
1101 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " 1110 dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, "
1102 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, 1111 "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags,
1103 mem->speed, mem->static_start, mem->card_start); 1112 mem->speed, mem->static_start, mem->card_start);
1104 1113
1105 /* copy the struct and modify the copy */ 1114 /* copy the struct and modify the copy */
1106 1115
1107 old = &s->mem_win[mem->map]; 1116 old = &s->mem_win[mem->map];
1108 1117
1109 *old = *mem; 1118 *old = *mem;
1110 old->flags &= (MAP_ATTRIB 1119 old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
1111 | MAP_WRPROT
1112 | MAP_16BIT
1113 | MAP_ACTIVE);
1114 1120
1115 return 0; 1121 return 0;
1116} 1122}
@@ -1121,7 +1127,7 @@ static int m8xx_sock_init(struct pcmcia_socket *sock)
1121 pccard_io_map io = { 0, 0, 0, 0, 1 }; 1127 pccard_io_map io = { 0, 0, 0, 0, 1 };
1122 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 }; 1128 pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
1123 1129
1124 dprintk( "sock_init(%d)\n", s); 1130 dprintk("sock_init(%d)\n", s);
1125 1131
1126 m8xx_set_socket(sock, &dead_socket); 1132 m8xx_set_socket(sock, &dead_socket);
1127 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) { 1133 for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
@@ -1143,7 +1149,7 @@ static int m8xx_sock_suspend(struct pcmcia_socket *sock)
1143} 1149}
1144 1150
1145static struct pccard_operations m8xx_services = { 1151static struct pccard_operations m8xx_services = {
1146 .init = m8xx_sock_init, 1152 .init = m8xx_sock_init,
1147 .suspend = m8xx_sock_suspend, 1153 .suspend = m8xx_sock_suspend,
1148 .get_status = m8xx_get_status, 1154 .get_status = m8xx_get_status,
1149 .set_socket = m8xx_set_socket, 1155 .set_socket = m8xx_set_socket,
@@ -1151,7 +1157,8 @@ static struct pccard_operations m8xx_services = {
1151 .set_mem_map = m8xx_set_mem_map, 1157 .set_mem_map = m8xx_set_mem_map,
1152}; 1158};
1153 1159
1154static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id *match) 1160static int __init m8xx_probe(struct of_device *ofdev,
1161 const struct of_device_id *match)
1155{ 1162{
1156 struct pcmcia_win *w; 1163 struct pcmcia_win *w;
1157 unsigned int i, m, hwirq; 1164 unsigned int i, m, hwirq;
@@ -1162,49 +1169,50 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1162 pcmcia_info("%s\n", version); 1169 pcmcia_info("%s\n", version);
1163 1170
1164 pcmcia = of_iomap(np, 0); 1171 pcmcia = of_iomap(np, 0);
1165 if(pcmcia == NULL) 1172 if (pcmcia == NULL)
1166 return -EINVAL; 1173 return -EINVAL;
1167 1174
1168 pcmcia_schlvl = irq_of_parse_and_map(np, 0); 1175 pcmcia_schlvl = irq_of_parse_and_map(np, 0);
1169 hwirq = irq_map[pcmcia_schlvl].hwirq; 1176 hwirq = irq_map[pcmcia_schlvl].hwirq;
1170 if (pcmcia_schlvl < 0) 1177 if (pcmcia_schlvl < 0)
1171 return -EINVAL; 1178 return -EINVAL;
1172 1179
1173 m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra; 1180 m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
1174 m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb; 1181 m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
1175 1182
1176
1177 pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG 1183 pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
1178 " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq); 1184 " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq);
1179 1185
1180 /* Configure Status change interrupt */ 1186 /* Configure Status change interrupt */
1181 1187
1182 if(request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED, 1188 if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
1183 driver_name, socket)) { 1189 driver_name, socket)) {
1184 pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n", 1190 pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
1185 pcmcia_schlvl); 1191 pcmcia_schlvl);
1186 return -1; 1192 return -1;
1187 } 1193 }
1188 1194
1189 w = (void *) &pcmcia->pcmc_pbr0; 1195 w = (void *)&pcmcia->pcmc_pbr0;
1190 1196
1191 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0)| M8XX_PCMCIA_MASK(1)); 1197 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
1192 clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1)); 1198 clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
1193 1199
1194 /* connect interrupt and disable CxOE */ 1200 /* connect interrupt and disable CxOE */
1195 1201
1196 out_be32(M8XX_PGCRX(0), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); 1202 out_be32(M8XX_PGCRX(0),
1197 out_be32(M8XX_PGCRX(1), M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16)); 1203 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
1204 out_be32(M8XX_PGCRX(1),
1205 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
1198 1206
1199 /* intialize the fixed memory windows */ 1207 /* intialize the fixed memory windows */
1200 1208
1201 for(i = 0; i < PCMCIA_SOCKETS_NO; i++){ 1209 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
1202 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { 1210 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
1203 out_be32(&w->br, PCMCIA_MEM_WIN_BASE + 1211 out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
1204 (PCMCIA_MEM_WIN_SIZE 1212 (PCMCIA_MEM_WIN_SIZE
1205 * (m + i * PCMCIA_MEM_WIN_NO))); 1213 * (m + i * PCMCIA_MEM_WIN_NO)));
1206 1214
1207 out_be32(&w->or, 0); /* set to not valid */ 1215 out_be32(&w->or, 0); /* set to not valid */
1208 1216
1209 w++; 1217 w++;
1210 } 1218 }
@@ -1218,10 +1226,11 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1218 hardware_enable(0); 1226 hardware_enable(0);
1219 hardware_enable(1); 1227 hardware_enable(1);
1220 1228
1221 for (i = 0 ; i < PCMCIA_SOCKETS_NO; i++) { 1229 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
1222 socket[i].slot = i; 1230 socket[i].slot = i;
1223 socket[i].socket.owner = THIS_MODULE; 1231 socket[i].socket.owner = THIS_MODULE;
1224 socket[i].socket.features = SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP; 1232 socket[i].socket.features =
1233 SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
1225 socket[i].socket.irq_mask = 0x000; 1234 socket[i].socket.irq_mask = 0x000;
1226 socket[i].socket.map_size = 0x1000; 1235 socket[i].socket.map_size = 0x1000;
1227 socket[i].socket.io_offset = 0; 1236 socket[i].socket.io_offset = 0;
@@ -1234,7 +1243,6 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1234 socket[i].bus_freq = ppc_proc_freq; 1243 socket[i].bus_freq = ppc_proc_freq;
1235 socket[i].hwirq = hwirq; 1244 socket[i].hwirq = hwirq;
1236 1245
1237
1238 } 1246 }
1239 1247
1240 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { 1248 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
@@ -1246,25 +1254,25 @@ static int __init m8xx_probe(struct of_device *ofdev, const struct of_device_id
1246 return 0; 1254 return 0;
1247} 1255}
1248 1256
1249static int m8xx_remove(struct of_device* ofdev) 1257static int m8xx_remove(struct of_device *ofdev)
1250{ 1258{
1251 u32 m, i; 1259 u32 m, i;
1252 struct pcmcia_win *w; 1260 struct pcmcia_win *w;
1253 pcmconf8xx_t *pcmcia = socket[0].pcmcia; 1261 pcmconf8xx_t *pcmcia = socket[0].pcmcia;
1254 1262
1255 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) { 1263 for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
1256 w = (void *) &pcmcia->pcmc_pbr0; 1264 w = (void *)&pcmcia->pcmc_pbr0;
1257 1265
1258 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i)); 1266 out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i));
1259 out_be32(&pcmcia->pcmc_per, 1267 out_be32(&pcmcia->pcmc_per,
1260 in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i)); 1268 in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
1261 1269
1262 /* turn off interrupt and disable CxOE */ 1270 /* turn off interrupt and disable CxOE */
1263 out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE); 1271 out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
1264 1272
1265 /* turn off memory windows */ 1273 /* turn off memory windows */
1266 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) { 1274 for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
1267 out_be32(&w->or, 0); /* set to not valid */ 1275 out_be32(&w->or, 0); /* set to not valid */
1268 w++; 1276 w++;
1269 } 1277 }
1270 1278
@@ -1299,21 +1307,21 @@ static int m8xx_resume(struct platform_device *pdev)
1299 1307
1300static struct of_device_id m8xx_pcmcia_match[] = { 1308static struct of_device_id m8xx_pcmcia_match[] = {
1301 { 1309 {
1302 .type = "pcmcia", 1310 .type = "pcmcia",
1303 .compatible = "fsl,pq-pcmcia", 1311 .compatible = "fsl,pq-pcmcia",
1304 }, 1312 },
1305 {}, 1313 {},
1306}; 1314};
1307 1315
1308MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match); 1316MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
1309 1317
1310static struct of_platform_driver m8xx_pcmcia_driver = { 1318static struct of_platform_driver m8xx_pcmcia_driver = {
1311 .name = (char *) driver_name, 1319 .name = (char *)driver_name,
1312 .match_table = m8xx_pcmcia_match, 1320 .match_table = m8xx_pcmcia_match,
1313 .probe = m8xx_probe, 1321 .probe = m8xx_probe,
1314 .remove = m8xx_remove, 1322 .remove = m8xx_remove,
1315 .suspend = m8xx_suspend, 1323 .suspend = m8xx_suspend,
1316 .resume = m8xx_resume, 1324 .resume = m8xx_resume,
1317}; 1325};
1318 1326
1319static int __init m8xx_init(void) 1327static int __init m8xx_init(void)
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 3a201b77b963..03baf1c64a2e 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -160,6 +160,7 @@ static int pnp_dock_thread(void * unused)
160{ 160{
161 static struct pnp_docking_station_info now; 161 static struct pnp_docking_station_info now;
162 int docked = -1, d = 0; 162 int docked = -1, d = 0;
163 set_freezable();
163 while (!unloading) 164 while (!unloading)
164 { 165 {
165 int status; 166 int status;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 83b071b6ece4..cea401feb0f3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -10,7 +10,6 @@ config RTC_LIB
10 10
11config RTC_CLASS 11config RTC_CLASS
12 tristate "RTC class" 12 tristate "RTC class"
13 depends on EXPERIMENTAL
14 default n 13 default n
15 select RTC_LIB 14 select RTC_LIB
16 help 15 help
@@ -119,7 +118,7 @@ config RTC_DRV_TEST
119 will be called rtc-test. 118 will be called rtc-test.
120 119
121comment "I2C RTC drivers" 120comment "I2C RTC drivers"
122 depends on RTC_CLASS 121 depends on RTC_CLASS && I2C
123 122
124config RTC_DRV_DS1307 123config RTC_DRV_DS1307
125 tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00" 124 tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00"
@@ -160,11 +159,11 @@ config RTC_DRV_MAX6900
160 will be called rtc-max6900. 159 will be called rtc-max6900.
161 160
162config RTC_DRV_RS5C372 161config RTC_DRV_RS5C372
163 tristate "Ricoh RS5C372A/B" 162 tristate "Ricoh RS5C372A/B, RV5C386, RV5C387A"
164 depends on RTC_CLASS && I2C 163 depends on RTC_CLASS && I2C
165 help 164 help
166 If you say yes here you get support for the 165 If you say yes here you get support for the
167 Ricoh RS5C372A and RS5C372B RTC chips. 166 Ricoh RS5C372A, RS5C372B, RV5C386, and RV5C387A RTC chips.
168 167
169 This driver can also be built as a module. If so, the module 168 This driver can also be built as a module. If so, the module
170 will be called rtc-rs5c372. 169 will be called rtc-rs5c372.
@@ -213,12 +212,40 @@ config RTC_DRV_PCF8583
213 This driver can also be built as a module. If so, the module 212 This driver can also be built as a module. If so, the module
214 will be called rtc-pcf8583. 213 will be called rtc-pcf8583.
215 214
215config RTC_DRV_M41T80
216 tristate "ST M41T80 series RTC"
217 depends on RTC_CLASS && I2C
218 help
219 If you say Y here you will get support for the
220 ST M41T80 RTC chips series. Currently following chips are
221 supported: M41T80, M41T81, M41T82, M41T83, M41ST84, M41ST85
222 and M41ST87.
223
224 This driver can also be built as a module. If so, the module
225 will be called rtc-m41t80.
226
227config RTC_DRV_M41T80_WDT
228 bool "ST M41T80 series RTC watchdog timer"
229 depends on RTC_DRV_M41T80
230 help
231 If you say Y here you will get support for the
232 watchdog timer in ST M41T80 RTC chips series.
233
234config RTC_DRV_TWL92330
235 boolean "TI TWL92330/Menelaus"
236 depends on RTC_CLASS && I2C && MENELAUS
237 help
238 If you say yes here you get support for the RTC on the
239 TWL92330 "Menelaus" power mangement chip, used with OMAP2
240 platforms. The support is integrated with the rest of
241 the Menelaus driver; it's not separate module.
242
216comment "SPI RTC drivers" 243comment "SPI RTC drivers"
217 depends on RTC_CLASS 244 depends on RTC_CLASS && SPI_MASTER
218 245
219config RTC_DRV_RS5C348 246config RTC_DRV_RS5C348
220 tristate "Ricoh RS5C348A/B" 247 tristate "Ricoh RS5C348A/B"
221 depends on RTC_CLASS && SPI 248 depends on RTC_CLASS && SPI_MASTER
222 help 249 help
223 If you say yes here you get support for the 250 If you say yes here you get support for the
224 Ricoh RS5C348A and RS5C348B RTC chips. 251 Ricoh RS5C348A and RS5C348B RTC chips.
@@ -228,7 +255,7 @@ config RTC_DRV_RS5C348
228 255
229config RTC_DRV_MAX6902 256config RTC_DRV_MAX6902
230 tristate "Maxim 6902" 257 tristate "Maxim 6902"
231 depends on RTC_CLASS && SPI 258 depends on RTC_CLASS && SPI_MASTER
232 help 259 help
233 If you say yes here you will get support for the 260 If you say yes here you will get support for the
234 Maxim MAX6902 SPI RTC chip. 261 Maxim MAX6902 SPI RTC chip.
@@ -262,6 +289,12 @@ config RTC_DRV_CMOS
262 This driver can also be built as a module. If so, the module 289 This driver can also be built as a module. If so, the module
263 will be called rtc-cmos. 290 will be called rtc-cmos.
264 291
292config RTC_DRV_DS1216
293 tristate "Dallas DS1216"
294 depends on RTC_CLASS && SNI_RM
295 help
296 If you say yes here you get support for the Dallas DS1216 RTC chips.
297
265config RTC_DRV_DS1553 298config RTC_DRV_DS1553
266 tristate "Dallas DS1553" 299 tristate "Dallas DS1553"
267 depends on RTC_CLASS 300 depends on RTC_CLASS
@@ -292,6 +325,16 @@ config RTC_DRV_M48T86
292 This driver can also be built as a module. If so, the module 325 This driver can also be built as a module. If so, the module
293 will be called rtc-m48t86. 326 will be called rtc-m48t86.
294 327
328config RTC_DRV_M48T59
329 tristate "ST M48T59"
330 depends on RTC_CLASS
331 help
332 If you say Y here you will get support for the
333 ST M48T59 RTC chip.
334
335 This driver can also be built as a module, if so, the module
336 will be called "rtc-m48t59".
337
295config RTC_DRV_V3020 338config RTC_DRV_V3020
296 tristate "EM Microelectronic V3020" 339 tristate "EM Microelectronic V3020"
297 depends on RTC_CLASS 340 depends on RTC_CLASS
@@ -379,6 +422,13 @@ config RTC_DRV_PL031
379 To compile this driver as a module, choose M here: the 422 To compile this driver as a module, choose M here: the
380 module will be called rtc-pl031. 423 module will be called rtc-pl031.
381 424
425config RTC_DRV_AT32AP700X
426 tristate "AT32AP700X series RTC"
427 depends on RTC_CLASS && PLATFORM_AT32AP
428 help
429 Driver for the internal RTC (Realtime Clock) on Atmel AVR32
430 AT32AP700x family processors.
431
382config RTC_DRV_AT91RM9200 432config RTC_DRV_AT91RM9200
383 tristate "AT91RM9200" 433 tristate "AT91RM9200"
384 depends on RTC_CLASS && ARCH_AT91RM9200 434 depends on RTC_CLASS && ARCH_AT91RM9200
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index a1afbc236073..3109af9a1651 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
19obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o 19obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
20obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o 20obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
21obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o 21obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
22obj-$(CONFIG_RTC_DRV_AT32AP700X) += rtc-at32ap700x.o
22obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o 23obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
23obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o 24obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
24obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o 25obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
@@ -28,6 +29,7 @@ obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
28obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o 29obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
29obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o 30obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o
30obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o 31obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
32obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o
31obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 33obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
32obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o 34obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
33obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o 35obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
@@ -41,3 +43,5 @@ obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
41obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o 43obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
42obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o 44obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o
43obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o 45obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
46obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
47obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
new file mode 100644
index 000000000000..2999214ca534
--- /dev/null
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -0,0 +1,317 @@
1/*
2 * An RTC driver for the AVR32 AT32AP700x processor series.
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/platform_device.h>
14#include <linux/rtc.h>
15#include <linux/io.h>
16
17/*
18 * This is a bare-bones RTC. It runs during most system sleep states, but has
19 * no battery backup and gets reset during system restart. It must be
20 * initialized from an external clock (network, I2C, etc) before it can be of
21 * much use.
22 *
23 * The alarm functionality is limited by the hardware, not supporting
24 * periodic interrupts.
25 */
26
27#define RTC_CTRL 0x00
28#define RTC_CTRL_EN 0
29#define RTC_CTRL_PCLR 1
30#define RTC_CTRL_TOPEN 2
31#define RTC_CTRL_PSEL 8
32
33#define RTC_VAL 0x04
34
35#define RTC_TOP 0x08
36
37#define RTC_IER 0x10
38#define RTC_IER_TOPI 0
39
40#define RTC_IDR 0x14
41#define RTC_IDR_TOPI 0
42
43#define RTC_IMR 0x18
44#define RTC_IMR_TOPI 0
45
46#define RTC_ISR 0x1c
47#define RTC_ISR_TOPI 0
48
49#define RTC_ICR 0x20
50#define RTC_ICR_TOPI 0
51
52#define RTC_BIT(name) (1 << RTC_##name)
53#define RTC_BF(name, value) ((value) << RTC_##name)
54
55#define rtc_readl(dev, reg) \
56 __raw_readl((dev)->regs + RTC_##reg)
57#define rtc_writel(dev, reg, value) \
58 __raw_writel((value), (dev)->regs + RTC_##reg)
59
60struct rtc_at32ap700x {
61 struct rtc_device *rtc;
62 void __iomem *regs;
63 unsigned long alarm_time;
64 unsigned long irq;
65 /* Protect against concurrent register access. */
66 spinlock_t lock;
67};
68
69static int at32_rtc_readtime(struct device *dev, struct rtc_time *tm)
70{
71 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
72 unsigned long now;
73
74 now = rtc_readl(rtc, VAL);
75 rtc_time_to_tm(now, tm);
76
77 return 0;
78}
79
80static int at32_rtc_settime(struct device *dev, struct rtc_time *tm)
81{
82 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
83 unsigned long now;
84 int ret;
85
86 ret = rtc_tm_to_time(tm, &now);
87 if (ret == 0)
88 rtc_writel(rtc, VAL, now);
89
90 return ret;
91}
92
93static int at32_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
94{
95 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
96
97 rtc_time_to_tm(rtc->alarm_time, &alrm->time);
98 alrm->pending = rtc_readl(rtc, IMR) & RTC_BIT(IMR_TOPI) ? 1 : 0;
99
100 return 0;
101}
102
103static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
104{
105 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
106 unsigned long rtc_unix_time;
107 unsigned long alarm_unix_time;
108 int ret;
109
110 rtc_unix_time = rtc_readl(rtc, VAL);
111
112 ret = rtc_tm_to_time(&alrm->time, &alarm_unix_time);
113 if (ret)
114 return ret;
115
116 if (alarm_unix_time < rtc_unix_time)
117 return -EINVAL;
118
119 spin_lock_irq(&rtc->lock);
120 rtc->alarm_time = alarm_unix_time;
121 rtc_writel(rtc, TOP, rtc->alarm_time);
122 if (alrm->pending)
123 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
124 | RTC_BIT(CTRL_TOPEN));
125 else
126 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
127 & ~RTC_BIT(CTRL_TOPEN));
128 spin_unlock_irq(&rtc->lock);
129
130 return ret;
131}
132
133static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
134 unsigned long arg)
135{
136 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
137 int ret = 0;
138
139 spin_lock_irq(&rtc->lock);
140
141 switch (cmd) {
142 case RTC_AIE_ON:
143 if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
144 ret = -EINVAL;
145 break;
146 }
147 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
148 | RTC_BIT(CTRL_TOPEN));
149 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
150 rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
151 break;
152 case RTC_AIE_OFF:
153 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
154 & ~RTC_BIT(CTRL_TOPEN));
155 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
156 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
157 break;
158 default:
159 ret = -ENOIOCTLCMD;
160 break;
161 }
162
163 spin_unlock_irq(&rtc->lock);
164
165 return ret;
166}
167
168static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
169{
170 struct rtc_at32ap700x *rtc = (struct rtc_at32ap700x *)dev_id;
171 unsigned long isr = rtc_readl(rtc, ISR);
172 unsigned long events = 0;
173 int ret = IRQ_NONE;
174
175 spin_lock(&rtc->lock);
176
177 if (isr & RTC_BIT(ISR_TOPI)) {
178 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
179 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
180 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
181 & ~RTC_BIT(CTRL_TOPEN));
182 rtc_writel(rtc, VAL, rtc->alarm_time);
183 events = RTC_AF | RTC_IRQF;
184 rtc_update_irq(rtc->rtc, 1, events);
185 ret = IRQ_HANDLED;
186 }
187
188 spin_unlock(&rtc->lock);
189
190 return ret;
191}
192
193static struct rtc_class_ops at32_rtc_ops = {
194 .ioctl = at32_rtc_ioctl,
195 .read_time = at32_rtc_readtime,
196 .set_time = at32_rtc_settime,
197 .read_alarm = at32_rtc_readalarm,
198 .set_alarm = at32_rtc_setalarm,
199};
200
201static int __init at32_rtc_probe(struct platform_device *pdev)
202{
203 struct resource *regs;
204 struct rtc_at32ap700x *rtc;
205 int irq = -1;
206 int ret;
207
208 rtc = kzalloc(sizeof(struct rtc_at32ap700x), GFP_KERNEL);
209 if (!rtc) {
210 dev_dbg(&pdev->dev, "out of memory\n");
211 return -ENOMEM;
212 }
213
214 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
215 if (!regs) {
216 dev_dbg(&pdev->dev, "no mmio resource defined\n");
217 ret = -ENXIO;
218 goto out;
219 }
220
221 irq = platform_get_irq(pdev, 0);
222 if (irq < 0) {
223 dev_dbg(&pdev->dev, "could not get irq\n");
224 ret = -ENXIO;
225 goto out;
226 }
227
228 ret = request_irq(irq, at32_rtc_interrupt, IRQF_SHARED, "rtc", rtc);
229 if (ret) {
230 dev_dbg(&pdev->dev, "could not request irq %d\n", irq);
231 goto out;
232 }
233
234 rtc->irq = irq;
235 rtc->regs = ioremap(regs->start, regs->end - regs->start + 1);
236 if (!rtc->regs) {
237 ret = -ENOMEM;
238 dev_dbg(&pdev->dev, "could not map I/O memory\n");
239 goto out_free_irq;
240 }
241 spin_lock_init(&rtc->lock);
242
243 /*
244 * Maybe init RTC: count from zero at 1 Hz, disable wrap irq.
245 *
246 * Do not reset VAL register, as it can hold an old time
247 * from last JTAG reset.
248 */
249 if (!(rtc_readl(rtc, CTRL) & RTC_BIT(CTRL_EN))) {
250 rtc_writel(rtc, CTRL, RTC_BIT(CTRL_PCLR));
251 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
252 rtc_writel(rtc, CTRL, RTC_BF(CTRL_PSEL, 0xe)
253 | RTC_BIT(CTRL_EN));
254 }
255
256 rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
257 &at32_rtc_ops, THIS_MODULE);
258 if (IS_ERR(rtc->rtc)) {
259 dev_dbg(&pdev->dev, "could not register rtc device\n");
260 ret = PTR_ERR(rtc->rtc);
261 goto out_iounmap;
262 }
263
264 platform_set_drvdata(pdev, rtc);
265
266 dev_info(&pdev->dev, "Atmel RTC for AT32AP700x at %08lx irq %ld\n",
267 (unsigned long)rtc->regs, rtc->irq);
268
269 return 0;
270
271out_iounmap:
272 iounmap(rtc->regs);
273out_free_irq:
274 free_irq(irq, rtc);
275out:
276 kfree(rtc);
277 return ret;
278}
279
280static int __exit at32_rtc_remove(struct platform_device *pdev)
281{
282 struct rtc_at32ap700x *rtc = platform_get_drvdata(pdev);
283
284 free_irq(rtc->irq, rtc);
285 iounmap(rtc->regs);
286 rtc_device_unregister(rtc->rtc);
287 kfree(rtc);
288 platform_set_drvdata(pdev, NULL);
289
290 return 0;
291}
292
293MODULE_ALIAS("at32ap700x_rtc");
294
295static struct platform_driver at32_rtc_driver = {
296 .remove = __exit_p(at32_rtc_remove),
297 .driver = {
298 .name = "at32ap700x_rtc",
299 .owner = THIS_MODULE,
300 },
301};
302
303static int __init at32_rtc_init(void)
304{
305 return platform_driver_probe(&at32_rtc_driver, at32_rtc_probe);
306}
307module_init(at32_rtc_init);
308
309static void __exit at32_rtc_exit(void)
310{
311 platform_driver_unregister(&at32_rtc_driver);
312}
313module_exit(at32_rtc_exit);
314
315MODULE_AUTHOR("Hans-Christian Egtvedt <hcegtvedt@atmel.com>");
316MODULE_DESCRIPTION("Real time clock for AVR32 AT32AP700x");
317MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index f4e5f0040ff7..304535942de2 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -341,6 +341,8 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
341 case RTC_IRQP_READ: 341 case RTC_IRQP_READ:
342 if (ops->irq_set_freq) 342 if (ops->irq_set_freq)
343 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg); 343 err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
344 else
345 err = -ENOTTY;
344 break; 346 break;
345 347
346 case RTC_IRQP_SET: 348 case RTC_IRQP_SET:
diff --git a/drivers/rtc/rtc-ds1216.c b/drivers/rtc/rtc-ds1216.c
new file mode 100644
index 000000000000..83efb88f8f23
--- /dev/null
+++ b/drivers/rtc/rtc-ds1216.c
@@ -0,0 +1,226 @@
1/*
2 * Dallas DS1216 RTC driver
3 *
4 * Copyright (c) 2007 Thomas Bogendoerfer
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/rtc.h>
10#include <linux/platform_device.h>
11#include <linux/bcd.h>
12
13#define DRV_VERSION "0.1"
14
15struct ds1216_regs {
16 u8 tsec;
17 u8 sec;
18 u8 min;
19 u8 hour;
20 u8 wday;
21 u8 mday;
22 u8 month;
23 u8 year;
24};
25
26#define DS1216_HOUR_1224 (1 << 7)
27#define DS1216_HOUR_AMPM (1 << 5)
28
29struct ds1216_priv {
30 struct rtc_device *rtc;
31 void __iomem *ioaddr;
32 size_t size;
33 unsigned long baseaddr;
34};
35
36static const u8 magic[] = {
37 0xc5, 0x3a, 0xa3, 0x5c, 0xc5, 0x3a, 0xa3, 0x5c
38};
39
40/*
41 * Read the 64 bit we'd like to have - It a series
42 * of 64 bits showing up in the LSB of the base register.
43 *
44 */
45static void ds1216_read(u8 __iomem *ioaddr, u8 *buf)
46{
47 unsigned char c;
48 int i, j;
49
50 for (i = 0; i < 8; i++) {
51 c = 0;
52 for (j = 0; j < 8; j++)
53 c |= (readb(ioaddr) & 0x1) << j;
54 buf[i] = c;
55 }
56}
57
58static void ds1216_write(u8 __iomem *ioaddr, const u8 *buf)
59{
60 unsigned char c;
61 int i, j;
62
63 for (i = 0; i < 8; i++) {
64 c = buf[i];
65 for (j = 0; j < 8; j++) {
66 writeb(c, ioaddr);
67 c = c >> 1;
68 }
69 }
70}
71
72static void ds1216_switch_ds_to_clock(u8 __iomem *ioaddr)
73{
74 /* Reset magic pointer */
75 readb(ioaddr);
76 /* Write 64 bit magic to DS1216 */
77 ds1216_write(ioaddr, magic);
78}
79
80static int ds1216_rtc_read_time(struct device *dev, struct rtc_time *tm)
81{
82 struct platform_device *pdev = to_platform_device(dev);
83 struct ds1216_priv *priv = platform_get_drvdata(pdev);
84 struct ds1216_regs regs;
85
86 ds1216_switch_ds_to_clock(priv->ioaddr);
87 ds1216_read(priv->ioaddr, (u8 *)&regs);
88
89 tm->tm_sec = BCD2BIN(regs.sec);
90 tm->tm_min = BCD2BIN(regs.min);
91 if (regs.hour & DS1216_HOUR_1224) {
92 /* AM/PM mode */
93 tm->tm_hour = BCD2BIN(regs.hour & 0x1f);
94 if (regs.hour & DS1216_HOUR_AMPM)
95 tm->tm_hour += 12;
96 } else
97 tm->tm_hour = BCD2BIN(regs.hour & 0x3f);
98 tm->tm_wday = (regs.wday & 7) - 1;
99 tm->tm_mday = BCD2BIN(regs.mday & 0x3f);
100 tm->tm_mon = BCD2BIN(regs.month & 0x1f);
101 tm->tm_year = BCD2BIN(regs.year);
102 if (tm->tm_year < 70)
103 tm->tm_year += 100;
104 return 0;
105}
106
107static int ds1216_rtc_set_time(struct device *dev, struct rtc_time *tm)
108{
109 struct platform_device *pdev = to_platform_device(dev);
110 struct ds1216_priv *priv = platform_get_drvdata(pdev);
111 struct ds1216_regs regs;
112
113 ds1216_switch_ds_to_clock(priv->ioaddr);
114 ds1216_read(priv->ioaddr, (u8 *)&regs);
115
116 regs.tsec = 0; /* clear 0.1 and 0.01 seconds */
117 regs.sec = BIN2BCD(tm->tm_sec);
118 regs.min = BIN2BCD(tm->tm_min);
119 regs.hour &= DS1216_HOUR_1224;
120 if (regs.hour && tm->tm_hour > 12) {
121 regs.hour |= DS1216_HOUR_AMPM;
122 tm->tm_hour -= 12;
123 }
124 regs.hour |= BIN2BCD(tm->tm_hour);
125 regs.wday &= ~7;
126 regs.wday |= tm->tm_wday;
127 regs.mday = BIN2BCD(tm->tm_mday);
128 regs.month = BIN2BCD(tm->tm_mon);
129 regs.year = BIN2BCD(tm->tm_year % 100);
130
131 ds1216_switch_ds_to_clock(priv->ioaddr);
132 ds1216_write(priv->ioaddr, (u8 *)&regs);
133 return 0;
134}
135
136static const struct rtc_class_ops ds1216_rtc_ops = {
137 .read_time = ds1216_rtc_read_time,
138 .set_time = ds1216_rtc_set_time,
139};
140
141static int __devinit ds1216_rtc_probe(struct platform_device *pdev)
142{
143 struct rtc_device *rtc;
144 struct resource *res;
145 struct ds1216_priv *priv;
146 int ret = 0;
147 u8 dummy[8];
148
149 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (!res)
151 return -ENODEV;
152 priv = kzalloc(sizeof *priv, GFP_KERNEL);
153 if (!priv)
154 return -ENOMEM;
155 priv->size = res->end - res->start + 1;
156 if (!request_mem_region(res->start, priv->size, pdev->name)) {
157 ret = -EBUSY;
158 goto out;
159 }
160 priv->baseaddr = res->start;
161 priv->ioaddr = ioremap(priv->baseaddr, priv->size);
162 if (!priv->ioaddr) {
163 ret = -ENOMEM;
164 goto out;
165 }
166 rtc = rtc_device_register("ds1216", &pdev->dev,
167 &ds1216_rtc_ops, THIS_MODULE);
168 if (IS_ERR(rtc)) {
169 ret = PTR_ERR(rtc);
170 goto out;
171 }
172 priv->rtc = rtc;
173 platform_set_drvdata(pdev, priv);
174
175 /* dummy read to get clock into a known state */
176 ds1216_read(priv->ioaddr, dummy);
177 return 0;
178
179out:
180 if (priv->rtc)
181 rtc_device_unregister(priv->rtc);
182 if (priv->ioaddr)
183 iounmap(priv->ioaddr);
184 if (priv->baseaddr)
185 release_mem_region(priv->baseaddr, priv->size);
186 kfree(priv);
187 return ret;
188}
189
190static int __devexit ds1216_rtc_remove(struct platform_device *pdev)
191{
192 struct ds1216_priv *priv = platform_get_drvdata(pdev);
193
194 rtc_device_unregister(priv->rtc);
195 iounmap(priv->ioaddr);
196 release_mem_region(priv->baseaddr, priv->size);
197 kfree(priv);
198 return 0;
199}
200
201static struct platform_driver ds1216_rtc_platform_driver = {
202 .driver = {
203 .name = "rtc-ds1216",
204 .owner = THIS_MODULE,
205 },
206 .probe = ds1216_rtc_probe,
207 .remove = __devexit_p(ds1216_rtc_remove),
208};
209
210static int __init ds1216_rtc_init(void)
211{
212 return platform_driver_register(&ds1216_rtc_platform_driver);
213}
214
215static void __exit ds1216_rtc_exit(void)
216{
217 platform_driver_unregister(&ds1216_rtc_platform_driver);
218}
219
220MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
221MODULE_DESCRIPTION("DS1216 RTC driver");
222MODULE_LICENSE("GPL");
223MODULE_VERSION(DRV_VERSION);
224
225module_init(ds1216_rtc_init);
226module_exit(ds1216_rtc_exit);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 3f0f7b8fa813..5158a625671f 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -24,29 +24,29 @@
24 * setting the date and time), Linux can ignore the non-clock features. 24 * setting the date and time), Linux can ignore the non-clock features.
25 * That's a natural job for a factory or repair bench. 25 * That's a natural job for a factory or repair bench.
26 * 26 *
27 * If the I2C "force" mechanism is used, we assume the chip is a ds1337. 27 * This is currently a simple no-alarms driver. If your board has the
28 * (Much better would be board-specific tables of I2C devices, along with 28 * alarm irq wired up on a ds1337 or ds1339, and you want to use that,
29 * the platform_data drivers would use to sort such issues out.) 29 * then look at the rtc-rs5c372 driver for code to steal...
30 */ 30 */
31enum ds_type { 31enum ds_type {
32 unknown = 0, 32 ds_1307,
33 ds_1307, /* or ds1338, ... */ 33 ds_1337,
34 ds_1337, /* or ds1339, ... */ 34 ds_1338,
35 ds_1340, /* or st m41t00, ... */ 35 ds_1339,
36 ds_1340,
37 m41t00,
36 // rs5c372 too? different address... 38 // rs5c372 too? different address...
37}; 39};
38 40
39static unsigned short normal_i2c[] = { 0x68, I2C_CLIENT_END };
40
41I2C_CLIENT_INSMOD;
42
43
44 41
45/* RTC registers don't differ much, except for the century flag */ 42/* RTC registers don't differ much, except for the century flag */
46#define DS1307_REG_SECS 0x00 /* 00-59 */ 43#define DS1307_REG_SECS 0x00 /* 00-59 */
47# define DS1307_BIT_CH 0x80 44# define DS1307_BIT_CH 0x80
45# define DS1340_BIT_nEOSC 0x80
48#define DS1307_REG_MIN 0x01 /* 00-59 */ 46#define DS1307_REG_MIN 0x01 /* 00-59 */
49#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */ 47#define DS1307_REG_HOUR 0x02 /* 00-23, or 1-12{am,pm} */
48# define DS1307_BIT_12HR 0x40 /* in REG_HOUR */
49# define DS1307_BIT_PM 0x20 /* in REG_HOUR */
50# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */ 50# define DS1340_BIT_CENTURY_EN 0x80 /* in REG_HOUR */
51# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */ 51# define DS1340_BIT_CENTURY 0x40 /* in REG_HOUR */
52#define DS1307_REG_WDAY 0x03 /* 01-07 */ 52#define DS1307_REG_WDAY 0x03 /* 01-07 */
@@ -56,11 +56,12 @@ I2C_CLIENT_INSMOD;
56#define DS1307_REG_YEAR 0x06 /* 00-99 */ 56#define DS1307_REG_YEAR 0x06 /* 00-99 */
57 57
58/* Other registers (control, status, alarms, trickle charge, NVRAM, etc) 58/* Other registers (control, status, alarms, trickle charge, NVRAM, etc)
59 * start at 7, and they differ a lot. Only control and status matter for RTC; 59 * start at 7, and they differ a LOT. Only control and status matter for
60 * be careful using them. 60 * basic RTC date and time functionality; be careful using them.
61 */ 61 */
62#define DS1307_REG_CONTROL 0x07 62#define DS1307_REG_CONTROL 0x07 /* or ds1338 */
63# define DS1307_BIT_OUT 0x80 63# define DS1307_BIT_OUT 0x80
64# define DS1338_BIT_OSF 0x20
64# define DS1307_BIT_SQWE 0x10 65# define DS1307_BIT_SQWE 0x10
65# define DS1307_BIT_RS1 0x02 66# define DS1307_BIT_RS1 0x02
66# define DS1307_BIT_RS0 0x01 67# define DS1307_BIT_RS0 0x01
@@ -71,6 +72,13 @@ I2C_CLIENT_INSMOD;
71# define DS1337_BIT_INTCN 0x04 72# define DS1337_BIT_INTCN 0x04
72# define DS1337_BIT_A2IE 0x02 73# define DS1337_BIT_A2IE 0x02
73# define DS1337_BIT_A1IE 0x01 74# define DS1337_BIT_A1IE 0x01
75#define DS1340_REG_CONTROL 0x07
76# define DS1340_BIT_OUT 0x80
77# define DS1340_BIT_FT 0x40
78# define DS1340_BIT_CALIB_SIGN 0x20
79# define DS1340_M_CALIBRATION 0x1f
80#define DS1340_REG_FLAG 0x09
81# define DS1340_BIT_OSF 0x80
74#define DS1337_REG_STATUS 0x0f 82#define DS1337_REG_STATUS 0x0f
75# define DS1337_BIT_OSF 0x80 83# define DS1337_BIT_OSF 0x80
76# define DS1337_BIT_A2I 0x02 84# define DS1337_BIT_A2I 0x02
@@ -84,21 +92,63 @@ struct ds1307 {
84 u8 regs[8]; 92 u8 regs[8];
85 enum ds_type type; 93 enum ds_type type;
86 struct i2c_msg msg[2]; 94 struct i2c_msg msg[2];
87 struct i2c_client client; 95 struct i2c_client *client;
96 struct i2c_client dev;
88 struct rtc_device *rtc; 97 struct rtc_device *rtc;
89}; 98};
90 99
100struct chip_desc {
101 char name[9];
102 unsigned nvram56:1;
103 unsigned alarm:1;
104 enum ds_type type;
105};
106
107static const struct chip_desc chips[] = { {
108 .name = "ds1307",
109 .type = ds_1307,
110 .nvram56 = 1,
111}, {
112 .name = "ds1337",
113 .type = ds_1337,
114 .alarm = 1,
115}, {
116 .name = "ds1338",
117 .type = ds_1338,
118 .nvram56 = 1,
119}, {
120 .name = "ds1339",
121 .type = ds_1339,
122 .alarm = 1,
123}, {
124 .name = "ds1340",
125 .type = ds_1340,
126}, {
127 .name = "m41t00",
128 .type = m41t00,
129}, };
130
131static inline const struct chip_desc *find_chip(const char *s)
132{
133 unsigned i;
134
135 for (i = 0; i < ARRAY_SIZE(chips); i++)
136 if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0)
137 return &chips[i];
138 return NULL;
139}
91 140
92static int ds1307_get_time(struct device *dev, struct rtc_time *t) 141static int ds1307_get_time(struct device *dev, struct rtc_time *t)
93{ 142{
94 struct ds1307 *ds1307 = dev_get_drvdata(dev); 143 struct ds1307 *ds1307 = dev_get_drvdata(dev);
95 int tmp; 144 int tmp;
96 145
97 /* read the RTC registers all at once */ 146 /* read the RTC date and time registers all at once */
98 ds1307->msg[1].flags = I2C_M_RD; 147 ds1307->msg[1].flags = I2C_M_RD;
99 ds1307->msg[1].len = 7; 148 ds1307->msg[1].len = 7;
100 149
101 tmp = i2c_transfer(ds1307->client.adapter, ds1307->msg, 2); 150 tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
151 ds1307->msg, 2);
102 if (tmp != 2) { 152 if (tmp != 2) {
103 dev_err(dev, "%s error %d\n", "read", tmp); 153 dev_err(dev, "%s error %d\n", "read", tmp);
104 return -EIO; 154 return -EIO;
@@ -129,7 +179,8 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
129 t->tm_hour, t->tm_mday, 179 t->tm_hour, t->tm_mday,
130 t->tm_mon, t->tm_year, t->tm_wday); 180 t->tm_mon, t->tm_year, t->tm_wday);
131 181
132 return 0; 182 /* initial clock setting can be undefined */
183 return rtc_valid_tm(t);
133} 184}
134 185
135static int ds1307_set_time(struct device *dev, struct rtc_time *t) 186static int ds1307_set_time(struct device *dev, struct rtc_time *t)
@@ -157,11 +208,18 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
157 tmp = t->tm_year - 100; 208 tmp = t->tm_year - 100;
158 buf[DS1307_REG_YEAR] = BIN2BCD(tmp); 209 buf[DS1307_REG_YEAR] = BIN2BCD(tmp);
159 210
160 if (ds1307->type == ds_1337) 211 switch (ds1307->type) {
212 case ds_1337:
213 case ds_1339:
161 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; 214 buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY;
162 else if (ds1307->type == ds_1340) 215 break;
216 case ds_1340:
163 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN 217 buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN
164 | DS1340_BIT_CENTURY; 218 | DS1340_BIT_CENTURY;
219 break;
220 default:
221 break;
222 }
165 223
166 ds1307->msg[1].flags = 0; 224 ds1307->msg[1].flags = 0;
167 ds1307->msg[1].len = 8; 225 ds1307->msg[1].len = 8;
@@ -170,7 +228,8 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
170 "write", buf[0], buf[1], buf[2], buf[3], 228 "write", buf[0], buf[1], buf[2], buf[3],
171 buf[4], buf[5], buf[6]); 229 buf[4], buf[5], buf[6]);
172 230
173 result = i2c_transfer(ds1307->client.adapter, &ds1307->msg[1], 1); 231 result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent),
232 &ds1307->msg[1], 1);
174 if (result != 1) { 233 if (result != 1) {
175 dev_err(dev, "%s error %d\n", "write", tmp); 234 dev_err(dev, "%s error %d\n", "write", tmp);
176 return -EIO; 235 return -EIO;
@@ -185,25 +244,29 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
185 244
186static struct i2c_driver ds1307_driver; 245static struct i2c_driver ds1307_driver;
187 246
188static int __devinit 247static int __devinit ds1307_probe(struct i2c_client *client)
189ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
190{ 248{
191 struct ds1307 *ds1307; 249 struct ds1307 *ds1307;
192 int err = -ENODEV; 250 int err = -ENODEV;
193 struct i2c_client *client;
194 int tmp; 251 int tmp;
195 252 const struct chip_desc *chip;
196 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) { 253 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
197 err = -ENOMEM; 254
198 goto exit; 255 chip = find_chip(client->name);
256 if (!chip) {
257 dev_err(&client->dev, "unknown chip type '%s'\n",
258 client->name);
259 return -ENODEV;
199 } 260 }
200 261
201 client = &ds1307->client; 262 if (!i2c_check_functionality(adapter,
202 client->addr = address; 263 I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
203 client->adapter = adapter; 264 return -EIO;
204 client->driver = &ds1307_driver; 265
205 client->flags = 0; 266 if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL)))
267 return -ENOMEM;
206 268
269 ds1307->client = client;
207 i2c_set_clientdata(client, ds1307); 270 i2c_set_clientdata(client, ds1307);
208 271
209 ds1307->msg[0].addr = client->addr; 272 ds1307->msg[0].addr = client->addr;
@@ -216,14 +279,16 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
216 ds1307->msg[1].len = sizeof(ds1307->regs); 279 ds1307->msg[1].len = sizeof(ds1307->regs);
217 ds1307->msg[1].buf = ds1307->regs; 280 ds1307->msg[1].buf = ds1307->regs;
218 281
219 /* HACK: "force" implies "needs ds1337-style-oscillator setup" */ 282 ds1307->type = chip->type;
220 if (kind >= 0) {
221 ds1307->type = ds_1337;
222 283
284 switch (ds1307->type) {
285 case ds_1337:
286 case ds_1339:
223 ds1307->reg_addr = DS1337_REG_CONTROL; 287 ds1307->reg_addr = DS1337_REG_CONTROL;
224 ds1307->msg[1].len = 2; 288 ds1307->msg[1].len = 2;
225 289
226 tmp = i2c_transfer(client->adapter, ds1307->msg, 2); 290 /* get registers that the "rtc" read below won't read... */
291 tmp = i2c_transfer(adapter, ds1307->msg, 2);
227 if (tmp != 2) { 292 if (tmp != 2) {
228 pr_debug("read error %d\n", tmp); 293 pr_debug("read error %d\n", tmp);
229 err = -EIO; 294 err = -EIO;
@@ -233,19 +298,26 @@ ds1307_detect(struct i2c_adapter *adapter, int address, int kind)
233 ds1307->reg_addr = 0; 298 ds1307->reg_addr = 0;
234 ds1307->msg[1].len = sizeof(ds1307->regs); 299 ds1307->msg[1].len = sizeof(ds1307->regs);
235 300
236 /* oscillator is off; need to turn it on */ 301 /* oscillator off? turn it on, so clock can tick. */
237 if ((ds1307->regs[0] & DS1337_BIT_nEOSC) 302 if (ds1307->regs[0] & DS1337_BIT_nEOSC)
238 || (ds1307->regs[1] & DS1337_BIT_OSF)) { 303 i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL,
239 printk(KERN_ERR "no ds1337 oscillator code\n"); 304 ds1307->regs[0] & ~DS1337_BIT_nEOSC);
240 goto exit_free; 305
306 /* oscillator fault? clear flag, and warn */
307 if (ds1307->regs[1] & DS1337_BIT_OSF) {
308 i2c_smbus_write_byte_data(client, DS1337_REG_STATUS,
309 ds1307->regs[1] & ~DS1337_BIT_OSF);
310 dev_warn(&client->dev, "SET TIME!\n");
241 } 311 }
242 } else 312 break;
243 ds1307->type = ds_1307; 313 default:
314 break;
315 }
244 316
245read_rtc: 317read_rtc:
246 /* read RTC registers */ 318 /* read RTC registers */
247 319
248 tmp = i2c_transfer(client->adapter, ds1307->msg, 2); 320 tmp = i2c_transfer(adapter, ds1307->msg, 2);
249 if (tmp != 2) { 321 if (tmp != 2) {
250 pr_debug("read error %d\n", tmp); 322 pr_debug("read error %d\n", tmp);
251 err = -EIO; 323 err = -EIO;
@@ -257,72 +329,80 @@ read_rtc:
257 * still a few values that are clearly out-of-range. 329 * still a few values that are clearly out-of-range.
258 */ 330 */
259 tmp = ds1307->regs[DS1307_REG_SECS]; 331 tmp = ds1307->regs[DS1307_REG_SECS];
260 if (tmp & DS1307_BIT_CH) { 332 switch (ds1307->type) {
261 if (ds1307->type && ds1307->type != ds_1307) { 333 case ds_1340:
262 pr_debug("not a ds1307?\n"); 334 /* FIXME read register with DS1340_BIT_OSF, use that to
263 goto exit_free; 335 * trigger the "set time" warning (*after* restarting the
264 } 336 * oscillator!) instead of this weaker ds1307/m41t00 test.
265 ds1307->type = ds_1307;
266
267 /* this partial initialization should work for ds1307,
268 * ds1338, ds1340, st m41t00, and more.
269 */ 337 */
270 dev_warn(&client->dev, "oscillator started; SET TIME!\n"); 338 case ds_1307:
271 i2c_smbus_write_byte_data(client, 0, 0); 339 case m41t00:
272 goto read_rtc; 340 /* clock halted? turn it on, so clock can tick. */
341 if (tmp & DS1307_BIT_CH) {
342 i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
343 dev_warn(&client->dev, "SET TIME!\n");
344 goto read_rtc;
345 }
346 break;
347 case ds_1338:
348 /* clock halted? turn it on, so clock can tick. */
349 if (tmp & DS1307_BIT_CH)
350 i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
351
352 /* oscillator fault? clear flag, and warn */
353 if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) {
354 i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL,
355 ds1307->regs[DS1337_REG_CONTROL]
356 & ~DS1338_BIT_OSF);
357 dev_warn(&client->dev, "SET TIME!\n");
358 goto read_rtc;
359 }
360 break;
361 case ds_1337:
362 case ds_1339:
363 break;
273 } 364 }
365
366 tmp = ds1307->regs[DS1307_REG_SECS];
274 tmp = BCD2BIN(tmp & 0x7f); 367 tmp = BCD2BIN(tmp & 0x7f);
275 if (tmp > 60) 368 if (tmp > 60)
276 goto exit_free; 369 goto exit_bad;
277 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f); 370 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MIN] & 0x7f);
278 if (tmp > 60) 371 if (tmp > 60)
279 goto exit_free; 372 goto exit_bad;
280 373
281 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f); 374 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MDAY] & 0x3f);
282 if (tmp == 0 || tmp > 31) 375 if (tmp == 0 || tmp > 31)
283 goto exit_free; 376 goto exit_bad;
284 377
285 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f); 378 tmp = BCD2BIN(ds1307->regs[DS1307_REG_MONTH] & 0x1f);
286 if (tmp == 0 || tmp > 12) 379 if (tmp == 0 || tmp > 12)
287 goto exit_free; 380 goto exit_bad;
288 381
289 /* force into in 24 hour mode (most chips) or
290 * disable century bit (ds1340)
291 */
292 tmp = ds1307->regs[DS1307_REG_HOUR]; 382 tmp = ds1307->regs[DS1307_REG_HOUR];
293 if (tmp & (1 << 6)) {
294 if (tmp & (1 << 5))
295 tmp = BCD2BIN(tmp & 0x1f) + 12;
296 else
297 tmp = BCD2BIN(tmp);
298 i2c_smbus_write_byte_data(client,
299 DS1307_REG_HOUR,
300 BIN2BCD(tmp));
301 }
302
303 /* FIXME chips like 1337 can generate alarm irqs too; those are
304 * worth exposing through the API (especially when the irq is
305 * wakeup-capable).
306 */
307
308 switch (ds1307->type) { 383 switch (ds1307->type) {
309 case unknown:
310 strlcpy(client->name, "unknown", I2C_NAME_SIZE);
311 break;
312 case ds_1307:
313 strlcpy(client->name, "ds1307", I2C_NAME_SIZE);
314 break;
315 case ds_1337:
316 strlcpy(client->name, "ds1337", I2C_NAME_SIZE);
317 break;
318 case ds_1340: 384 case ds_1340:
319 strlcpy(client->name, "ds1340", I2C_NAME_SIZE); 385 case m41t00:
386 /* NOTE: ignores century bits; fix before deploying
387 * systems that will run through year 2100.
388 */
320 break; 389 break;
321 } 390 default:
391 if (!(tmp & DS1307_BIT_12HR))
392 break;
322 393
323 /* Tell the I2C layer a new client has arrived */ 394 /* Be sure we're in 24 hour mode. Multi-master systems
324 if ((err = i2c_attach_client(client))) 395 * take note...
325 goto exit_free; 396 */
397 tmp = BCD2BIN(tmp & 0x1f);
398 if (tmp == 12)
399 tmp = 0;
400 if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
401 tmp += 12;
402 i2c_smbus_write_byte_data(client,
403 DS1307_REG_HOUR,
404 BIN2BCD(tmp));
405 }
326 406
327 ds1307->rtc = rtc_device_register(client->name, &client->dev, 407 ds1307->rtc = rtc_device_register(client->name, &client->dev,
328 &ds13xx_rtc_ops, THIS_MODULE); 408 &ds13xx_rtc_ops, THIS_MODULE);
@@ -330,46 +410,40 @@ read_rtc:
330 err = PTR_ERR(ds1307->rtc); 410 err = PTR_ERR(ds1307->rtc);
331 dev_err(&client->dev, 411 dev_err(&client->dev,
332 "unable to register the class device\n"); 412 "unable to register the class device\n");
333 goto exit_detach; 413 goto exit_free;
334 } 414 }
335 415
336 return 0; 416 return 0;
337 417
338exit_detach: 418exit_bad:
339 i2c_detach_client(client); 419 dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n",
420 "bogus register",
421 ds1307->regs[0], ds1307->regs[1],
422 ds1307->regs[2], ds1307->regs[3],
423 ds1307->regs[4], ds1307->regs[5],
424 ds1307->regs[6]);
425
340exit_free: 426exit_free:
341 kfree(ds1307); 427 kfree(ds1307);
342exit:
343 return err; 428 return err;
344} 429}
345 430
346static int __devinit 431static int __devexit ds1307_remove(struct i2c_client *client)
347ds1307_attach_adapter(struct i2c_adapter *adapter)
348{
349 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
350 return 0;
351 return i2c_probe(adapter, &addr_data, ds1307_detect);
352}
353
354static int __devexit ds1307_detach_client(struct i2c_client *client)
355{ 432{
356 int err;
357 struct ds1307 *ds1307 = i2c_get_clientdata(client); 433 struct ds1307 *ds1307 = i2c_get_clientdata(client);
358 434
359 rtc_device_unregister(ds1307->rtc); 435 rtc_device_unregister(ds1307->rtc);
360 if ((err = i2c_detach_client(client)))
361 return err;
362 kfree(ds1307); 436 kfree(ds1307);
363 return 0; 437 return 0;
364} 438}
365 439
366static struct i2c_driver ds1307_driver = { 440static struct i2c_driver ds1307_driver = {
367 .driver = { 441 .driver = {
368 .name = "ds1307", 442 .name = "rtc-ds1307",
369 .owner = THIS_MODULE, 443 .owner = THIS_MODULE,
370 }, 444 },
371 .attach_adapter = ds1307_attach_adapter, 445 .probe = ds1307_probe,
372 .detach_client = __devexit_p(ds1307_detach_client), 446 .remove = __devexit_p(ds1307_remove),
373}; 447};
374 448
375static int __init ds1307_init(void) 449static int __init ds1307_init(void)
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
new file mode 100644
index 000000000000..80c4a8463065
--- /dev/null
+++ b/drivers/rtc/rtc-m41t80.c
@@ -0,0 +1,917 @@
1/*
2 * I2C client/driver for the ST M41T80 family of i2c rtc chips.
3 *
4 * Author: Alexander Bigga <ab@mycable.de>
5 *
6 * Based on m41t00.c by Mark A. Greer <mgreer@mvista.com>
7 *
8 * 2006 (c) mycable GmbH
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/string.h>
20#include <linux/i2c.h>
21#include <linux/rtc.h>
22#include <linux/bcd.h>
23#ifdef CONFIG_RTC_DRV_M41T80_WDT
24#include <linux/miscdevice.h>
25#include <linux/watchdog.h>
26#include <linux/reboot.h>
27#include <linux/fs.h>
28#include <linux/ioctl.h>
29#endif
30
31#define M41T80_REG_SSEC 0
32#define M41T80_REG_SEC 1
33#define M41T80_REG_MIN 2
34#define M41T80_REG_HOUR 3
35#define M41T80_REG_WDAY 4
36#define M41T80_REG_DAY 5
37#define M41T80_REG_MON 6
38#define M41T80_REG_YEAR 7
39#define M41T80_REG_ALARM_MON 0xa
40#define M41T80_REG_ALARM_DAY 0xb
41#define M41T80_REG_ALARM_HOUR 0xc
42#define M41T80_REG_ALARM_MIN 0xd
43#define M41T80_REG_ALARM_SEC 0xe
44#define M41T80_REG_FLAGS 0xf
45#define M41T80_REG_SQW 0x13
46
47#define M41T80_DATETIME_REG_SIZE (M41T80_REG_YEAR + 1)
48#define M41T80_ALARM_REG_SIZE \
49 (M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON)
50
51#define M41T80_SEC_ST (1 << 7) /* ST: Stop Bit */
52#define M41T80_ALMON_AFE (1 << 7) /* AFE: AF Enable Bit */
53#define M41T80_ALMON_SQWE (1 << 6) /* SQWE: SQW Enable Bit */
54#define M41T80_ALHOUR_HT (1 << 6) /* HT: Halt Update Bit */
55#define M41T80_FLAGS_AF (1 << 6) /* AF: Alarm Flag Bit */
56#define M41T80_FLAGS_BATT_LOW (1 << 4) /* BL: Battery Low Bit */
57
58#define M41T80_FEATURE_HT (1 << 0)
59#define M41T80_FEATURE_BL (1 << 1)
60
61#define DRV_VERSION "0.05"
62
63struct m41t80_chip_info {
64 const char *name;
65 u8 features;
66};
67
68static const struct m41t80_chip_info m41t80_chip_info_tbl[] = {
69 {
70 .name = "m41t80",
71 .features = 0,
72 },
73 {
74 .name = "m41t81",
75 .features = M41T80_FEATURE_HT,
76 },
77 {
78 .name = "m41t81s",
79 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
80 },
81 {
82 .name = "m41t82",
83 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
84 },
85 {
86 .name = "m41t83",
87 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
88 },
89 {
90 .name = "m41st84",
91 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
92 },
93 {
94 .name = "m41st85",
95 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
96 },
97 {
98 .name = "m41st87",
99 .features = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
100 },
101};
102
103struct m41t80_data {
104 const struct m41t80_chip_info *chip;
105 struct rtc_device *rtc;
106};
107
108static int m41t80_get_datetime(struct i2c_client *client,
109 struct rtc_time *tm)
110{
111 u8 buf[M41T80_DATETIME_REG_SIZE], dt_addr[1] = { M41T80_REG_SEC };
112 struct i2c_msg msgs[] = {
113 {
114 .addr = client->addr,
115 .flags = 0,
116 .len = 1,
117 .buf = dt_addr,
118 },
119 {
120 .addr = client->addr,
121 .flags = I2C_M_RD,
122 .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
123 .buf = buf + M41T80_REG_SEC,
124 },
125 };
126
127 if (i2c_transfer(client->adapter, msgs, 2) < 0) {
128 dev_err(&client->dev, "read error\n");
129 return -EIO;
130 }
131
132 tm->tm_sec = BCD2BIN(buf[M41T80_REG_SEC] & 0x7f);
133 tm->tm_min = BCD2BIN(buf[M41T80_REG_MIN] & 0x7f);
134 tm->tm_hour = BCD2BIN(buf[M41T80_REG_HOUR] & 0x3f);
135 tm->tm_mday = BCD2BIN(buf[M41T80_REG_DAY] & 0x3f);
136 tm->tm_wday = buf[M41T80_REG_WDAY] & 0x07;
137 tm->tm_mon = BCD2BIN(buf[M41T80_REG_MON] & 0x1f) - 1;
138
139 /* assume 20YY not 19YY, and ignore the Century Bit */
140 tm->tm_year = BCD2BIN(buf[M41T80_REG_YEAR]) + 100;
141 return 0;
142}
143
144/* Sets the given date and time to the real time clock. */
145static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm)
146{
147 u8 wbuf[1 + M41T80_DATETIME_REG_SIZE];
148 u8 *buf = &wbuf[1];
149 u8 dt_addr[1] = { M41T80_REG_SEC };
150 struct i2c_msg msgs_in[] = {
151 {
152 .addr = client->addr,
153 .flags = 0,
154 .len = 1,
155 .buf = dt_addr,
156 },
157 {
158 .addr = client->addr,
159 .flags = I2C_M_RD,
160 .len = M41T80_DATETIME_REG_SIZE - M41T80_REG_SEC,
161 .buf = buf + M41T80_REG_SEC,
162 },
163 };
164 struct i2c_msg msgs[] = {
165 {
166 .addr = client->addr,
167 .flags = 0,
168 .len = 1 + M41T80_DATETIME_REG_SIZE,
169 .buf = wbuf,
170 },
171 };
172
173 /* Read current reg values into buf[1..7] */
174 if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
175 dev_err(&client->dev, "read error\n");
176 return -EIO;
177 }
178
179 wbuf[0] = 0; /* offset into rtc's regs */
180 /* Merge time-data and register flags into buf[0..7] */
181 buf[M41T80_REG_SSEC] = 0;
182 buf[M41T80_REG_SEC] =
183 BIN2BCD(tm->tm_sec) | (buf[M41T80_REG_SEC] & ~0x7f);
184 buf[M41T80_REG_MIN] =
185 BIN2BCD(tm->tm_min) | (buf[M41T80_REG_MIN] & ~0x7f);
186 buf[M41T80_REG_HOUR] =
187 BIN2BCD(tm->tm_hour) | (buf[M41T80_REG_HOUR] & ~0x3f) ;
188 buf[M41T80_REG_WDAY] =
189 (tm->tm_wday & 0x07) | (buf[M41T80_REG_WDAY] & ~0x07);
190 buf[M41T80_REG_DAY] =
191 BIN2BCD(tm->tm_mday) | (buf[M41T80_REG_DAY] & ~0x3f);
192 buf[M41T80_REG_MON] =
193 BIN2BCD(tm->tm_mon + 1) | (buf[M41T80_REG_MON] & ~0x1f);
194 /* assume 20YY not 19YY */
195 buf[M41T80_REG_YEAR] = BIN2BCD(tm->tm_year % 100);
196
197 if (i2c_transfer(client->adapter, msgs, 1) != 1) {
198 dev_err(&client->dev, "write error\n");
199 return -EIO;
200 }
201 return 0;
202}
203
204#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
205static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
206{
207 struct i2c_client *client = to_i2c_client(dev);
208 struct m41t80_data *clientdata = i2c_get_clientdata(client);
209 u8 reg;
210
211 if (clientdata->chip->features & M41T80_FEATURE_BL) {
212 reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
213 seq_printf(seq, "battery\t\t: %s\n",
214 (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
215 }
216 return 0;
217}
218#else
219#define m41t80_rtc_proc NULL
220#endif
221
222static int m41t80_rtc_read_time(struct device *dev, struct rtc_time *tm)
223{
224 return m41t80_get_datetime(to_i2c_client(dev), tm);
225}
226
227static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
228{
229 return m41t80_set_datetime(to_i2c_client(dev), tm);
230}
231
232#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
233static int
234m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
235{
236 struct i2c_client *client = to_i2c_client(dev);
237 int rc;
238
239 switch (cmd) {
240 case RTC_AIE_OFF:
241 case RTC_AIE_ON:
242 break;
243 default:
244 return -ENOIOCTLCMD;
245 }
246
247 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
248 if (rc < 0)
249 goto err;
250 switch (cmd) {
251 case RTC_AIE_OFF:
252 rc &= ~M41T80_ALMON_AFE;
253 break;
254 case RTC_AIE_ON:
255 rc |= M41T80_ALMON_AFE;
256 break;
257 }
258 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
259 goto err;
260 return 0;
261err:
262 return -EIO;
263}
264#else
265#define m41t80_rtc_ioctl NULL
266#endif
267
268static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
269{
270 struct i2c_client *client = to_i2c_client(dev);
271 u8 wbuf[1 + M41T80_ALARM_REG_SIZE];
272 u8 *buf = &wbuf[1];
273 u8 *reg = buf - M41T80_REG_ALARM_MON;
274 u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
275 struct i2c_msg msgs_in[] = {
276 {
277 .addr = client->addr,
278 .flags = 0,
279 .len = 1,
280 .buf = dt_addr,
281 },
282 {
283 .addr = client->addr,
284 .flags = I2C_M_RD,
285 .len = M41T80_ALARM_REG_SIZE,
286 .buf = buf,
287 },
288 };
289 struct i2c_msg msgs[] = {
290 {
291 .addr = client->addr,
292 .flags = 0,
293 .len = 1 + M41T80_ALARM_REG_SIZE,
294 .buf = wbuf,
295 },
296 };
297
298 if (i2c_transfer(client->adapter, msgs_in, 2) < 0) {
299 dev_err(&client->dev, "read error\n");
300 return -EIO;
301 }
302 reg[M41T80_REG_ALARM_MON] &= ~(0x1f | M41T80_ALMON_AFE);
303 reg[M41T80_REG_ALARM_DAY] = 0;
304 reg[M41T80_REG_ALARM_HOUR] &= ~(0x3f | 0x80);
305 reg[M41T80_REG_ALARM_MIN] = 0;
306 reg[M41T80_REG_ALARM_SEC] = 0;
307
308 wbuf[0] = M41T80_REG_ALARM_MON; /* offset into rtc's regs */
309 reg[M41T80_REG_ALARM_SEC] |= t->time.tm_sec >= 0 ?
310 BIN2BCD(t->time.tm_sec) : 0x80;
311 reg[M41T80_REG_ALARM_MIN] |= t->time.tm_min >= 0 ?
312 BIN2BCD(t->time.tm_min) : 0x80;
313 reg[M41T80_REG_ALARM_HOUR] |= t->time.tm_hour >= 0 ?
314 BIN2BCD(t->time.tm_hour) : 0x80;
315 reg[M41T80_REG_ALARM_DAY] |= t->time.tm_mday >= 0 ?
316 BIN2BCD(t->time.tm_mday) : 0x80;
317 if (t->time.tm_mon >= 0)
318 reg[M41T80_REG_ALARM_MON] |= BIN2BCD(t->time.tm_mon + 1);
319 else
320 reg[M41T80_REG_ALARM_DAY] |= 0x40;
321
322 if (i2c_transfer(client->adapter, msgs, 1) != 1) {
323 dev_err(&client->dev, "write error\n");
324 return -EIO;
325 }
326
327 if (t->enabled) {
328 reg[M41T80_REG_ALARM_MON] |= M41T80_ALMON_AFE;
329 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
330 reg[M41T80_REG_ALARM_MON]) < 0) {
331 dev_err(&client->dev, "write error\n");
332 return -EIO;
333 }
334 }
335 return 0;
336}
337
338static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
339{
340 struct i2c_client *client = to_i2c_client(dev);
341 u8 buf[M41T80_ALARM_REG_SIZE + 1]; /* all alarm regs and flags */
342 u8 dt_addr[1] = { M41T80_REG_ALARM_MON };
343 u8 *reg = buf - M41T80_REG_ALARM_MON;
344 struct i2c_msg msgs[] = {
345 {
346 .addr = client->addr,
347 .flags = 0,
348 .len = 1,
349 .buf = dt_addr,
350 },
351 {
352 .addr = client->addr,
353 .flags = I2C_M_RD,
354 .len = M41T80_ALARM_REG_SIZE + 1,
355 .buf = buf,
356 },
357 };
358
359 if (i2c_transfer(client->adapter, msgs, 2) < 0) {
360 dev_err(&client->dev, "read error\n");
361 return -EIO;
362 }
363 t->time.tm_sec = -1;
364 t->time.tm_min = -1;
365 t->time.tm_hour = -1;
366 t->time.tm_mday = -1;
367 t->time.tm_mon = -1;
368 if (!(reg[M41T80_REG_ALARM_SEC] & 0x80))
369 t->time.tm_sec = BCD2BIN(reg[M41T80_REG_ALARM_SEC] & 0x7f);
370 if (!(reg[M41T80_REG_ALARM_MIN] & 0x80))
371 t->time.tm_min = BCD2BIN(reg[M41T80_REG_ALARM_MIN] & 0x7f);
372 if (!(reg[M41T80_REG_ALARM_HOUR] & 0x80))
373 t->time.tm_hour = BCD2BIN(reg[M41T80_REG_ALARM_HOUR] & 0x3f);
374 if (!(reg[M41T80_REG_ALARM_DAY] & 0x80))
375 t->time.tm_mday = BCD2BIN(reg[M41T80_REG_ALARM_DAY] & 0x3f);
376 if (!(reg[M41T80_REG_ALARM_DAY] & 0x40))
377 t->time.tm_mon = BCD2BIN(reg[M41T80_REG_ALARM_MON] & 0x1f) - 1;
378 t->time.tm_year = -1;
379 t->time.tm_wday = -1;
380 t->time.tm_yday = -1;
381 t->time.tm_isdst = -1;
382 t->enabled = !!(reg[M41T80_REG_ALARM_MON] & M41T80_ALMON_AFE);
383 t->pending = !!(reg[M41T80_REG_FLAGS] & M41T80_FLAGS_AF);
384 return 0;
385}
386
387static struct rtc_class_ops m41t80_rtc_ops = {
388 .read_time = m41t80_rtc_read_time,
389 .set_time = m41t80_rtc_set_time,
390 .read_alarm = m41t80_rtc_read_alarm,
391 .set_alarm = m41t80_rtc_set_alarm,
392 .proc = m41t80_rtc_proc,
393 .ioctl = m41t80_rtc_ioctl,
394};
395
396#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
397static ssize_t m41t80_sysfs_show_flags(struct device *dev,
398 struct device_attribute *attr, char *buf)
399{
400 struct i2c_client *client = to_i2c_client(dev);
401 int val;
402
403 val = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
404 if (val < 0)
405 return -EIO;
406 return sprintf(buf, "%#x\n", val);
407}
408static DEVICE_ATTR(flags, S_IRUGO, m41t80_sysfs_show_flags, NULL);
409
410static ssize_t m41t80_sysfs_show_sqwfreq(struct device *dev,
411 struct device_attribute *attr, char *buf)
412{
413 struct i2c_client *client = to_i2c_client(dev);
414 int val;
415
416 val = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
417 if (val < 0)
418 return -EIO;
419 val = (val >> 4) & 0xf;
420 switch (val) {
421 case 0:
422 break;
423 case 1:
424 val = 32768;
425 break;
426 default:
427 val = 32768 >> val;
428 }
429 return sprintf(buf, "%d\n", val);
430}
431static ssize_t m41t80_sysfs_set_sqwfreq(struct device *dev,
432 struct device_attribute *attr,
433 const char *buf, size_t count)
434{
435 struct i2c_client *client = to_i2c_client(dev);
436 int almon, sqw;
437 int val = simple_strtoul(buf, NULL, 0);
438
439 if (val) {
440 if (!is_power_of_2(val))
441 return -EINVAL;
442 val = ilog2(val);
443 if (val == 15)
444 val = 1;
445 else if (val < 14)
446 val = 15 - val;
447 else
448 return -EINVAL;
449 }
450 /* disable SQW, set SQW frequency & re-enable */
451 almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
452 if (almon < 0)
453 return -EIO;
454 sqw = i2c_smbus_read_byte_data(client, M41T80_REG_SQW);
455 if (sqw < 0)
456 return -EIO;
457 sqw = (sqw & 0x0f) | (val << 4);
458 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
459 almon & ~M41T80_ALMON_SQWE) < 0 ||
460 i2c_smbus_write_byte_data(client, M41T80_REG_SQW, sqw) < 0)
461 return -EIO;
462 if (val && i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON,
463 almon | M41T80_ALMON_SQWE) < 0)
464 return -EIO;
465 return count;
466}
467static DEVICE_ATTR(sqwfreq, S_IRUGO | S_IWUSR,
468 m41t80_sysfs_show_sqwfreq, m41t80_sysfs_set_sqwfreq);
469
470static struct attribute *attrs[] = {
471 &dev_attr_flags.attr,
472 &dev_attr_sqwfreq.attr,
473 NULL,
474};
475static struct attribute_group attr_group = {
476 .attrs = attrs,
477};
478
479static int m41t80_sysfs_register(struct device *dev)
480{
481 return sysfs_create_group(&dev->kobj, &attr_group);
482}
483#else
484static int m41t80_sysfs_register(struct device *dev)
485{
486 return 0;
487}
488#endif
489
490#ifdef CONFIG_RTC_DRV_M41T80_WDT
491/*
492 *****************************************************************************
493 *
494 * Watchdog Driver
495 *
496 *****************************************************************************
497 */
498static struct i2c_client *save_client;
499
500/* Default margin */
501#define WD_TIMO 60 /* 1..31 seconds */
502
503static int wdt_margin = WD_TIMO;
504module_param(wdt_margin, int, 0);
505MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 60s)");
506
507static unsigned long wdt_is_open;
508static int boot_flag;
509
510/**
511 * wdt_ping:
512 *
513 * Reload counter one with the watchdog timeout. We don't bother reloading
514 * the cascade counter.
515 */
516static void wdt_ping(void)
517{
518 unsigned char i2c_data[2];
519 struct i2c_msg msgs1[1] = {
520 {
521 .addr = save_client->addr,
522 .flags = 0,
523 .len = 2,
524 .buf = i2c_data,
525 },
526 };
527 i2c_data[0] = 0x09; /* watchdog register */
528
529 if (wdt_margin > 31)
530 i2c_data[1] = (wdt_margin & 0xFC) | 0x83; /* resolution = 4s */
531 else
532 /*
533 * WDS = 1 (0x80), mulitplier = WD_TIMO, resolution = 1s (0x02)
534 */
535 i2c_data[1] = wdt_margin<<2 | 0x82;
536
537 i2c_transfer(save_client->adapter, msgs1, 1);
538}
539
540/**
541 * wdt_disable:
542 *
543 * disables watchdog.
544 */
545static void wdt_disable(void)
546{
547 unsigned char i2c_data[2], i2c_buf[0x10];
548 struct i2c_msg msgs0[2] = {
549 {
550 .addr = save_client->addr,
551 .flags = 0,
552 .len = 1,
553 .buf = i2c_data,
554 },
555 {
556 .addr = save_client->addr,
557 .flags = I2C_M_RD,
558 .len = 1,
559 .buf = i2c_buf,
560 },
561 };
562 struct i2c_msg msgs1[1] = {
563 {
564 .addr = save_client->addr,
565 .flags = 0,
566 .len = 2,
567 .buf = i2c_data,
568 },
569 };
570
571 i2c_data[0] = 0x09;
572 i2c_transfer(save_client->adapter, msgs0, 2);
573
574 i2c_data[0] = 0x09;
575 i2c_data[1] = 0x00;
576 i2c_transfer(save_client->adapter, msgs1, 1);
577}
578
579/**
580 * wdt_write:
581 * @file: file handle to the watchdog
582 * @buf: buffer to write (unused as data does not matter here
583 * @count: count of bytes
584 * @ppos: pointer to the position to write. No seeks allowed
585 *
586 * A write to a watchdog device is defined as a keepalive signal. Any
587 * write of data will do, as we we don't define content meaning.
588 */
589static ssize_t wdt_write(struct file *file, const char __user *buf,
590 size_t count, loff_t *ppos)
591{
592 /* Can't seek (pwrite) on this device
593 if (ppos != &file->f_pos)
594 return -ESPIPE;
595 */
596 if (count) {
597 wdt_ping();
598 return 1;
599 }
600 return 0;
601}
602
603static ssize_t wdt_read(struct file *file, char __user *buf,
604 size_t count, loff_t *ppos)
605{
606 return 0;
607}
608
609/**
610 * wdt_ioctl:
611 * @inode: inode of the device
612 * @file: file handle to the device
613 * @cmd: watchdog command
614 * @arg: argument pointer
615 *
616 * The watchdog API defines a common set of functions for all watchdogs
617 * according to their available features. We only actually usefully support
618 * querying capabilities and current status.
619 */
620static int wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
621 unsigned long arg)
622{
623 int new_margin, rv;
624 static struct watchdog_info ident = {
625 .options = WDIOF_POWERUNDER | WDIOF_KEEPALIVEPING |
626 WDIOF_SETTIMEOUT,
627 .firmware_version = 1,
628 .identity = "M41T80 WTD"
629 };
630
631 switch (cmd) {
632 case WDIOC_GETSUPPORT:
633 return copy_to_user((struct watchdog_info __user *)arg, &ident,
634 sizeof(ident)) ? -EFAULT : 0;
635
636 case WDIOC_GETSTATUS:
637 case WDIOC_GETBOOTSTATUS:
638 return put_user(boot_flag, (int __user *)arg);
639 case WDIOC_KEEPALIVE:
640 wdt_ping();
641 return 0;
642 case WDIOC_SETTIMEOUT:
643 if (get_user(new_margin, (int __user *)arg))
644 return -EFAULT;
645 /* Arbitrary, can't find the card's limits */
646 if (new_margin < 1 || new_margin > 124)
647 return -EINVAL;
648 wdt_margin = new_margin;
649 wdt_ping();
650 /* Fall */
651 case WDIOC_GETTIMEOUT:
652 return put_user(wdt_margin, (int __user *)arg);
653
654 case WDIOC_SETOPTIONS:
655 if (copy_from_user(&rv, (int __user *)arg, sizeof(int)))
656 return -EFAULT;
657
658 if (rv & WDIOS_DISABLECARD) {
659 printk(KERN_INFO
660 "rtc-m41t80: disable watchdog\n");
661 wdt_disable();
662 }
663
664 if (rv & WDIOS_ENABLECARD) {
665 printk(KERN_INFO
666 "rtc-m41t80: enable watchdog\n");
667 wdt_ping();
668 }
669
670 return -EINVAL;
671 }
672 return -ENOTTY;
673}
674
675/**
676 * wdt_open:
677 * @inode: inode of device
678 * @file: file handle to device
679 *
680 */
681static int wdt_open(struct inode *inode, struct file *file)
682{
683 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) {
684 if (test_and_set_bit(0, &wdt_is_open))
685 return -EBUSY;
686 /*
687 * Activate
688 */
689 wdt_is_open = 1;
690 return 0;
691 }
692 return -ENODEV;
693}
694
695/**
696 * wdt_close:
697 * @inode: inode to board
698 * @file: file handle to board
699 *
700 */
701static int wdt_release(struct inode *inode, struct file *file)
702{
703 if (MINOR(inode->i_rdev) == WATCHDOG_MINOR)
704 clear_bit(0, &wdt_is_open);
705 return 0;
706}
707
708/**
709 * notify_sys:
710 * @this: our notifier block
711 * @code: the event being reported
712 * @unused: unused
713 *
714 * Our notifier is called on system shutdowns. We want to turn the card
715 * off at reboot otherwise the machine will reboot again during memory
716 * test or worse yet during the following fsck. This would suck, in fact
717 * trust me - if it happens it does suck.
718 */
719static int wdt_notify_sys(struct notifier_block *this, unsigned long code,
720 void *unused)
721{
722 if (code == SYS_DOWN || code == SYS_HALT)
723 /* Disable Watchdog */
724 wdt_disable();
725 return NOTIFY_DONE;
726}
727
728static const struct file_operations wdt_fops = {
729 .owner = THIS_MODULE,
730 .read = wdt_read,
731 .ioctl = wdt_ioctl,
732 .write = wdt_write,
733 .open = wdt_open,
734 .release = wdt_release,
735};
736
737static struct miscdevice wdt_dev = {
738 .minor = WATCHDOG_MINOR,
739 .name = "watchdog",
740 .fops = &wdt_fops,
741};
742
743/*
744 * The WDT card needs to learn about soft shutdowns in order to
745 * turn the timebomb registers off.
746 */
747static struct notifier_block wdt_notifier = {
748 .notifier_call = wdt_notify_sys,
749};
750#endif /* CONFIG_RTC_DRV_M41T80_WDT */
751
752/*
753 *****************************************************************************
754 *
755 * Driver Interface
756 *
757 *****************************************************************************
758 */
759static int m41t80_probe(struct i2c_client *client)
760{
761 int i, rc = 0;
762 struct rtc_device *rtc = NULL;
763 struct rtc_time tm;
764 const struct m41t80_chip_info *chip;
765 struct m41t80_data *clientdata = NULL;
766
767 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
768 | I2C_FUNC_SMBUS_BYTE_DATA)) {
769 rc = -ENODEV;
770 goto exit;
771 }
772
773 dev_info(&client->dev,
774 "chip found, driver version " DRV_VERSION "\n");
775
776 chip = NULL;
777 for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
778 if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
779 chip = &m41t80_chip_info_tbl[i];
780 break;
781 }
782 }
783 if (!chip) {
784 dev_err(&client->dev, "%s is not supported\n", client->name);
785 rc = -ENODEV;
786 goto exit;
787 }
788
789 clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
790 if (!clientdata) {
791 rc = -ENOMEM;
792 goto exit;
793 }
794
795 rtc = rtc_device_register(client->name, &client->dev,
796 &m41t80_rtc_ops, THIS_MODULE);
797 if (IS_ERR(rtc)) {
798 rc = PTR_ERR(rtc);
799 rtc = NULL;
800 goto exit;
801 }
802
803 clientdata->rtc = rtc;
804 clientdata->chip = chip;
805 i2c_set_clientdata(client, clientdata);
806
807 /* Make sure HT (Halt Update) bit is cleared */
808 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
809 if (rc < 0)
810 goto ht_err;
811
812 if (rc & M41T80_ALHOUR_HT) {
813 if (chip->features & M41T80_FEATURE_HT) {
814 m41t80_get_datetime(client, &tm);
815 dev_info(&client->dev, "HT bit was set!\n");
816 dev_info(&client->dev,
817 "Power Down at "
818 "%04i-%02i-%02i %02i:%02i:%02i\n",
819 tm.tm_year + 1900,
820 tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
821 tm.tm_min, tm.tm_sec);
822 }
823 if (i2c_smbus_write_byte_data(client,
824 M41T80_REG_ALARM_HOUR,
825 rc & ~M41T80_ALHOUR_HT) < 0)
826 goto ht_err;
827 }
828
829 /* Make sure ST (stop) bit is cleared */
830 rc = i2c_smbus_read_byte_data(client, M41T80_REG_SEC);
831 if (rc < 0)
832 goto st_err;
833
834 if (rc & M41T80_SEC_ST) {
835 if (i2c_smbus_write_byte_data(client, M41T80_REG_SEC,
836 rc & ~M41T80_SEC_ST) < 0)
837 goto st_err;
838 }
839
840 rc = m41t80_sysfs_register(&client->dev);
841 if (rc)
842 goto exit;
843
844#ifdef CONFIG_RTC_DRV_M41T80_WDT
845 if (chip->features & M41T80_FEATURE_HT) {
846 rc = misc_register(&wdt_dev);
847 if (rc)
848 goto exit;
849 rc = register_reboot_notifier(&wdt_notifier);
850 if (rc) {
851 misc_deregister(&wdt_dev);
852 goto exit;
853 }
854 save_client = client;
855 }
856#endif
857 return 0;
858
859st_err:
860 rc = -EIO;
861 dev_err(&client->dev, "Can't clear ST bit\n");
862 goto exit;
863ht_err:
864 rc = -EIO;
865 dev_err(&client->dev, "Can't clear HT bit\n");
866 goto exit;
867
868exit:
869 if (rtc)
870 rtc_device_unregister(rtc);
871 kfree(clientdata);
872 return rc;
873}
874
875static int m41t80_remove(struct i2c_client *client)
876{
877 struct m41t80_data *clientdata = i2c_get_clientdata(client);
878 struct rtc_device *rtc = clientdata->rtc;
879
880#ifdef CONFIG_RTC_DRV_M41T80_WDT
881 if (clientdata->chip->features & M41T80_FEATURE_HT) {
882 misc_deregister(&wdt_dev);
883 unregister_reboot_notifier(&wdt_notifier);
884 }
885#endif
886 if (rtc)
887 rtc_device_unregister(rtc);
888 kfree(clientdata);
889
890 return 0;
891}
892
893static struct i2c_driver m41t80_driver = {
894 .driver = {
895 .name = "m41t80",
896 },
897 .probe = m41t80_probe,
898 .remove = m41t80_remove,
899};
900
901static int __init m41t80_rtc_init(void)
902{
903 return i2c_add_driver(&m41t80_driver);
904}
905
906static void __exit m41t80_rtc_exit(void)
907{
908 i2c_del_driver(&m41t80_driver);
909}
910
911MODULE_AUTHOR("Alexander Bigga <ab@mycable.de>");
912MODULE_DESCRIPTION("ST Microelectronics M41T80 series RTC I2C Client Driver");
913MODULE_LICENSE("GPL");
914MODULE_VERSION(DRV_VERSION);
915
916module_init(m41t80_rtc_init);
917module_exit(m41t80_rtc_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
new file mode 100644
index 000000000000..33b752350ab5
--- /dev/null
+++ b/drivers/rtc/rtc-m48t59.c
@@ -0,0 +1,491 @@
1/*
2 * ST M48T59 RTC driver
3 *
4 * Copyright (c) 2007 Wind River Systems, Inc.
5 *
6 * Author: Mark Zhan <rongkai.zhan@windriver.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/device.h>
18#include <linux/platform_device.h>
19#include <linux/rtc.h>
20#include <linux/rtc/m48t59.h>
21#include <linux/bcd.h>
22
23#ifndef NO_IRQ
24#define NO_IRQ (-1)
25#endif
26
27#define M48T59_READ(reg) pdata->read_byte(dev, reg)
28#define M48T59_WRITE(val, reg) pdata->write_byte(dev, reg, val)
29
30#define M48T59_SET_BITS(mask, reg) \
31 M48T59_WRITE((M48T59_READ(reg) | (mask)), (reg))
32#define M48T59_CLEAR_BITS(mask, reg) \
33 M48T59_WRITE((M48T59_READ(reg) & ~(mask)), (reg))
34
35struct m48t59_private {
36 void __iomem *ioaddr;
37 unsigned int size; /* iomem size */
38 unsigned int irq;
39 struct rtc_device *rtc;
40 spinlock_t lock; /* serialize the NVRAM and RTC access */
41};
42
43/*
44 * This is the generic access method when the chip is memory-mapped
45 */
46static void
47m48t59_mem_writeb(struct device *dev, u32 ofs, u8 val)
48{
49 struct platform_device *pdev = to_platform_device(dev);
50 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
51
52 writeb(val, m48t59->ioaddr+ofs);
53}
54
55static u8
56m48t59_mem_readb(struct device *dev, u32 ofs)
57{
58 struct platform_device *pdev = to_platform_device(dev);
59 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
60
61 return readb(m48t59->ioaddr+ofs);
62}
63
64/*
65 * NOTE: M48T59 only uses BCD mode
66 */
67static int m48t59_rtc_read_time(struct device *dev, struct rtc_time *tm)
68{
69 struct platform_device *pdev = to_platform_device(dev);
70 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
71 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
72 unsigned long flags;
73 u8 val;
74
75 spin_lock_irqsave(&m48t59->lock, flags);
76 /* Issue the READ command */
77 M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
78
79 tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
80 /* tm_mon is 0-11 */
81 tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
82 tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_MDAY));
83
84 val = M48T59_READ(M48T59_WDAY);
85 if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB)) {
86 dev_dbg(dev, "Century bit is enabled\n");
87 tm->tm_year += 100; /* one century */
88 }
89
90 tm->tm_wday = BCD2BIN(val & 0x07);
91 tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_HOUR) & 0x3F);
92 tm->tm_min = BCD2BIN(M48T59_READ(M48T59_MIN) & 0x7F);
93 tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_SEC) & 0x7F);
94
95 /* Clear the READ bit */
96 M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
97 spin_unlock_irqrestore(&m48t59->lock, flags);
98
99 dev_dbg(dev, "RTC read time %04d-%02d-%02d %02d/%02d/%02d\n",
100 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
101 tm->tm_hour, tm->tm_min, tm->tm_sec);
102 return 0;
103}
104
105static int m48t59_rtc_set_time(struct device *dev, struct rtc_time *tm)
106{
107 struct platform_device *pdev = to_platform_device(dev);
108 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
109 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
110 unsigned long flags;
111 u8 val = 0;
112
113 dev_dbg(dev, "RTC set time %04d-%02d-%02d %02d/%02d/%02d\n",
114 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
115 tm->tm_hour, tm->tm_min, tm->tm_sec);
116
117 spin_lock_irqsave(&m48t59->lock, flags);
118 /* Issue the WRITE command */
119 M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
120
121 M48T59_WRITE((BIN2BCD(tm->tm_sec) & 0x7F), M48T59_SEC);
122 M48T59_WRITE((BIN2BCD(tm->tm_min) & 0x7F), M48T59_MIN);
123 M48T59_WRITE((BIN2BCD(tm->tm_hour) & 0x3F), M48T59_HOUR);
124 M48T59_WRITE((BIN2BCD(tm->tm_mday) & 0x3F), M48T59_MDAY);
125 /* tm_mon is 0-11 */
126 M48T59_WRITE((BIN2BCD(tm->tm_mon + 1) & 0x1F), M48T59_MONTH);
127 M48T59_WRITE(BIN2BCD(tm->tm_year % 100), M48T59_YEAR);
128
129 if (tm->tm_year/100)
130 val = (M48T59_WDAY_CEB | M48T59_WDAY_CB);
131 val |= (BIN2BCD(tm->tm_wday) & 0x07);
132 M48T59_WRITE(val, M48T59_WDAY);
133
134 /* Clear the WRITE bit */
135 M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
136 spin_unlock_irqrestore(&m48t59->lock, flags);
137 return 0;
138}
139
140/*
141 * Read alarm time and date in RTC
142 */
143static int m48t59_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
144{
145 struct platform_device *pdev = to_platform_device(dev);
146 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
147 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
148 struct rtc_time *tm = &alrm->time;
149 unsigned long flags;
150 u8 val;
151
152 /* If no irq, we don't support ALARM */
153 if (m48t59->irq == NO_IRQ)
154 return -EIO;
155
156 spin_lock_irqsave(&m48t59->lock, flags);
157 /* Issue the READ command */
158 M48T59_SET_BITS(M48T59_CNTL_READ, M48T59_CNTL);
159
160 tm->tm_year = BCD2BIN(M48T59_READ(M48T59_YEAR));
161 /* tm_mon is 0-11 */
162 tm->tm_mon = BCD2BIN(M48T59_READ(M48T59_MONTH)) - 1;
163
164 val = M48T59_READ(M48T59_WDAY);
165 if ((val & M48T59_WDAY_CEB) && (val & M48T59_WDAY_CB))
166 tm->tm_year += 100; /* one century */
167
168 tm->tm_mday = BCD2BIN(M48T59_READ(M48T59_ALARM_DATE));
169 tm->tm_hour = BCD2BIN(M48T59_READ(M48T59_ALARM_HOUR));
170 tm->tm_min = BCD2BIN(M48T59_READ(M48T59_ALARM_MIN));
171 tm->tm_sec = BCD2BIN(M48T59_READ(M48T59_ALARM_SEC));
172
173 /* Clear the READ bit */
174 M48T59_CLEAR_BITS(M48T59_CNTL_READ, M48T59_CNTL);
175 spin_unlock_irqrestore(&m48t59->lock, flags);
176
177 dev_dbg(dev, "RTC read alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
178 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
179 tm->tm_hour, tm->tm_min, tm->tm_sec);
180 return 0;
181}
182
183/*
184 * Set alarm time and date in RTC
185 */
186static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
187{
188 struct platform_device *pdev = to_platform_device(dev);
189 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
190 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
191 struct rtc_time *tm = &alrm->time;
192 u8 mday, hour, min, sec;
193 unsigned long flags;
194
195 /* If no irq, we don't support ALARM */
196 if (m48t59->irq == NO_IRQ)
197 return -EIO;
198
199 /*
200 * 0xff means "always match"
201 */
202 mday = tm->tm_mday;
203 mday = (mday >= 1 && mday <= 31) ? BIN2BCD(mday) : 0xff;
204 if (mday == 0xff)
205 mday = M48T59_READ(M48T59_MDAY);
206
207 hour = tm->tm_hour;
208 hour = (hour < 24) ? BIN2BCD(hour) : 0x00;
209
210 min = tm->tm_min;
211 min = (min < 60) ? BIN2BCD(min) : 0x00;
212
213 sec = tm->tm_sec;
214 sec = (sec < 60) ? BIN2BCD(sec) : 0x00;
215
216 spin_lock_irqsave(&m48t59->lock, flags);
217 /* Issue the WRITE command */
218 M48T59_SET_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
219
220 M48T59_WRITE(mday, M48T59_ALARM_DATE);
221 M48T59_WRITE(hour, M48T59_ALARM_HOUR);
222 M48T59_WRITE(min, M48T59_ALARM_MIN);
223 M48T59_WRITE(sec, M48T59_ALARM_SEC);
224
225 /* Clear the WRITE bit */
226 M48T59_CLEAR_BITS(M48T59_CNTL_WRITE, M48T59_CNTL);
227 spin_unlock_irqrestore(&m48t59->lock, flags);
228
229 dev_dbg(dev, "RTC set alarm time %04d-%02d-%02d %02d/%02d/%02d\n",
230 tm->tm_year + 1900, tm->tm_mon, tm->tm_mday,
231 tm->tm_hour, tm->tm_min, tm->tm_sec);
232 return 0;
233}
234
235/*
236 * Handle commands from user-space
237 */
238static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
239 unsigned long arg)
240{
241 struct platform_device *pdev = to_platform_device(dev);
242 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
243 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
244 unsigned long flags;
245 int ret = 0;
246
247 spin_lock_irqsave(&m48t59->lock, flags);
248 switch (cmd) {
249 case RTC_AIE_OFF: /* alarm interrupt off */
250 M48T59_WRITE(0x00, M48T59_INTR);
251 break;
252 case RTC_AIE_ON: /* alarm interrupt on */
253 M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
254 break;
255 default:
256 ret = -ENOIOCTLCMD;
257 break;
258 }
259 spin_unlock_irqrestore(&m48t59->lock, flags);
260
261 return ret;
262}
263
264static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
265{
266 struct platform_device *pdev = to_platform_device(dev);
267 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
268 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
269 unsigned long flags;
270 u8 val;
271
272 spin_lock_irqsave(&m48t59->lock, flags);
273 val = M48T59_READ(M48T59_FLAGS);
274 spin_unlock_irqrestore(&m48t59->lock, flags);
275
276 seq_printf(seq, "battery\t\t: %s\n",
277 (val & M48T59_FLAGS_BF) ? "low" : "normal");
278 return 0;
279}
280
281/*
282 * IRQ handler for the RTC
283 */
284static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
285{
286 struct device *dev = (struct device *)dev_id;
287 struct platform_device *pdev = to_platform_device(dev);
288 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
289 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
290 u8 event;
291
292 spin_lock(&m48t59->lock);
293 event = M48T59_READ(M48T59_FLAGS);
294 spin_unlock(&m48t59->lock);
295
296 if (event & M48T59_FLAGS_AF) {
297 rtc_update_irq(m48t59->rtc, 1, (RTC_AF | RTC_IRQF));
298 return IRQ_HANDLED;
299 }
300
301 return IRQ_NONE;
302}
303
304static const struct rtc_class_ops m48t59_rtc_ops = {
305 .ioctl = m48t59_rtc_ioctl,
306 .read_time = m48t59_rtc_read_time,
307 .set_time = m48t59_rtc_set_time,
308 .read_alarm = m48t59_rtc_readalarm,
309 .set_alarm = m48t59_rtc_setalarm,
310 .proc = m48t59_rtc_proc,
311};
312
313static ssize_t m48t59_nvram_read(struct kobject *kobj,
314 struct bin_attribute *bin_attr,
315 char *buf, loff_t pos, size_t size)
316{
317 struct device *dev = container_of(kobj, struct device, kobj);
318 struct platform_device *pdev = to_platform_device(dev);
319 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
320 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
321 ssize_t cnt = 0;
322 unsigned long flags;
323
324 for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
325 spin_lock_irqsave(&m48t59->lock, flags);
326 *buf++ = M48T59_READ(cnt);
327 spin_unlock_irqrestore(&m48t59->lock, flags);
328 }
329
330 return cnt;
331}
332
333static ssize_t m48t59_nvram_write(struct kobject *kobj,
334 struct bin_attribute *bin_attr,
335 char *buf, loff_t pos, size_t size)
336{
337 struct device *dev = container_of(kobj, struct device, kobj);
338 struct platform_device *pdev = to_platform_device(dev);
339 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
340 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
341 ssize_t cnt = 0;
342 unsigned long flags;
343
344 for (; size > 0 && pos < M48T59_NVRAM_SIZE; cnt++, size--) {
345 spin_lock_irqsave(&m48t59->lock, flags);
346 M48T59_WRITE(*buf++, cnt);
347 spin_unlock_irqrestore(&m48t59->lock, flags);
348 }
349
350 return cnt;
351}
352
353static struct bin_attribute m48t59_nvram_attr = {
354 .attr = {
355 .name = "nvram",
356 .mode = S_IRUGO | S_IWUGO,
357 .owner = THIS_MODULE,
358 },
359 .read = m48t59_nvram_read,
360 .write = m48t59_nvram_write,
361};
362
363static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
364{
365 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
366 struct m48t59_private *m48t59 = NULL;
367 struct resource *res;
368 int ret = -ENOMEM;
369
370 /* This chip could be memory-mapped or I/O-mapped */
371 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
372 if (!res) {
373 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
374 if (!res)
375 return -EINVAL;
376 }
377
378 if (res->flags & IORESOURCE_IO) {
379 /* If we are I/O-mapped, the platform should provide
380 * the operations accessing chip registers.
381 */
382 if (!pdata || !pdata->write_byte || !pdata->read_byte)
383 return -EINVAL;
384 } else if (res->flags & IORESOURCE_MEM) {
385 /* we are memory-mapped */
386 if (!pdata) {
387 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
388 if (!pdata)
389 return -ENOMEM;
390 /* Ensure we only kmalloc platform data once */
391 pdev->dev.platform_data = pdata;
392 }
393
394 /* Try to use the generic memory read/write ops */
395 if (!pdata->write_byte)
396 pdata->write_byte = m48t59_mem_writeb;
397 if (!pdata->read_byte)
398 pdata->read_byte = m48t59_mem_readb;
399 }
400
401 m48t59 = kzalloc(sizeof(*m48t59), GFP_KERNEL);
402 if (!m48t59)
403 return -ENOMEM;
404
405 m48t59->size = res->end - res->start + 1;
406 m48t59->ioaddr = ioremap(res->start, m48t59->size);
407 if (!m48t59->ioaddr)
408 goto out;
409
410 /* Try to get irq number. We also can work in
411 * the mode without IRQ.
412 */
413 m48t59->irq = platform_get_irq(pdev, 0);
414 if (m48t59->irq < 0)
415 m48t59->irq = NO_IRQ;
416
417 if (m48t59->irq != NO_IRQ) {
418 ret = request_irq(m48t59->irq, m48t59_rtc_interrupt,
419 IRQF_SHARED, "rtc-m48t59", &pdev->dev);
420 if (ret)
421 goto out;
422 }
423
424 m48t59->rtc = rtc_device_register("m48t59", &pdev->dev,
425 &m48t59_rtc_ops, THIS_MODULE);
426 if (IS_ERR(m48t59->rtc)) {
427 ret = PTR_ERR(m48t59->rtc);
428 goto out;
429 }
430
431 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
432 if (ret)
433 goto out;
434
435 spin_lock_init(&m48t59->lock);
436 platform_set_drvdata(pdev, m48t59);
437 return 0;
438
439out:
440 if (!IS_ERR(m48t59->rtc))
441 rtc_device_unregister(m48t59->rtc);
442 if (m48t59->irq != NO_IRQ)
443 free_irq(m48t59->irq, &pdev->dev);
444 if (m48t59->ioaddr)
445 iounmap(m48t59->ioaddr);
446 if (m48t59)
447 kfree(m48t59);
448 return ret;
449}
450
451static int __devexit m48t59_rtc_remove(struct platform_device *pdev)
452{
453 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
454
455 sysfs_remove_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
456 if (!IS_ERR(m48t59->rtc))
457 rtc_device_unregister(m48t59->rtc);
458 if (m48t59->ioaddr)
459 iounmap(m48t59->ioaddr);
460 if (m48t59->irq != NO_IRQ)
461 free_irq(m48t59->irq, &pdev->dev);
462 platform_set_drvdata(pdev, NULL);
463 kfree(m48t59);
464 return 0;
465}
466
467static struct platform_driver m48t59_rtc_platdrv = {
468 .driver = {
469 .name = "rtc-m48t59",
470 .owner = THIS_MODULE,
471 },
472 .probe = m48t59_rtc_probe,
473 .remove = __devexit_p(m48t59_rtc_remove),
474};
475
476static int __init m48t59_rtc_init(void)
477{
478 return platform_driver_register(&m48t59_rtc_platdrv);
479}
480
481static void __exit m48t59_rtc_exit(void)
482{
483 platform_driver_unregister(&m48t59_rtc_platdrv);
484}
485
486module_init(m48t59_rtc_init);
487module_exit(m48t59_rtc_exit);
488
489MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
490MODULE_DESCRIPTION("M48T59 RTC driver");
491MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 09bbe575647b..6b67b5097927 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -13,13 +13,7 @@
13#include <linux/rtc.h> 13#include <linux/rtc.h>
14#include <linux/bcd.h> 14#include <linux/bcd.h>
15 15
16#define DRV_VERSION "0.4" 16#define DRV_VERSION "0.5"
17
18/* Addresses to scan */
19static unsigned short normal_i2c[] = { /* 0x32,*/ I2C_CLIENT_END };
20
21/* Insmod parameters */
22I2C_CLIENT_INSMOD;
23 17
24 18
25/* 19/*
@@ -88,9 +82,6 @@ struct rs5c372 {
88 unsigned has_irq:1; 82 unsigned has_irq:1;
89 char buf[17]; 83 char buf[17];
90 char *regs; 84 char *regs;
91
92 /* on conversion to a "new style" i2c driver, this vanishes */
93 struct i2c_client dev;
94}; 85};
95 86
96static int rs5c_get_regs(struct rs5c372 *rs5c) 87static int rs5c_get_regs(struct rs5c372 *rs5c)
@@ -483,25 +474,35 @@ static int rs5c_sysfs_register(struct device *dev)
483 return err; 474 return err;
484} 475}
485 476
477static void rs5c_sysfs_unregister(struct device *dev)
478{
479 device_remove_file(dev, &dev_attr_trim);
480 device_remove_file(dev, &dev_attr_osc);
481}
482
486#else 483#else
487static int rs5c_sysfs_register(struct device *dev) 484static int rs5c_sysfs_register(struct device *dev)
488{ 485{
489 return 0; 486 return 0;
490} 487}
488
489static void rs5c_sysfs_unregister(struct device *dev)
490{
491 /* nothing */
492}
491#endif /* SYSFS */ 493#endif /* SYSFS */
492 494
493static struct i2c_driver rs5c372_driver; 495static struct i2c_driver rs5c372_driver;
494 496
495static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind) 497static int rs5c372_probe(struct i2c_client *client)
496{ 498{
497 int err = 0; 499 int err = 0;
498 struct i2c_client *client;
499 struct rs5c372 *rs5c372; 500 struct rs5c372 *rs5c372;
500 struct rtc_time tm; 501 struct rtc_time tm;
501 502
502 dev_dbg(&adapter->dev, "%s\n", __FUNCTION__); 503 dev_dbg(&client->dev, "%s\n", __FUNCTION__);
503 504
504 if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { 505 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
505 err = -ENODEV; 506 err = -ENODEV;
506 goto exit; 507 goto exit;
507 } 508 }
@@ -514,35 +515,22 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
514 /* we read registers 0x0f then 0x00-0x0f; skip the first one */ 515 /* we read registers 0x0f then 0x00-0x0f; skip the first one */
515 rs5c372->regs=&rs5c372->buf[1]; 516 rs5c372->regs=&rs5c372->buf[1];
516 517
517 /* On conversion to a "new style" i2c driver, we'll be handed
518 * the i2c_client (we won't create it)
519 */
520 client = &rs5c372->dev;
521 rs5c372->client = client; 518 rs5c372->client = client;
522
523 /* I2C client */
524 client->addr = address;
525 client->driver = &rs5c372_driver;
526 client->adapter = adapter;
527
528 strlcpy(client->name, rs5c372_driver.driver.name, I2C_NAME_SIZE);
529
530 i2c_set_clientdata(client, rs5c372); 519 i2c_set_clientdata(client, rs5c372);
531 520
532 /* Inform the i2c layer */
533 if ((err = i2c_attach_client(client)))
534 goto exit_kfree;
535
536 err = rs5c_get_regs(rs5c372); 521 err = rs5c_get_regs(rs5c372);
537 if (err < 0) 522 if (err < 0)
538 goto exit_detach; 523 goto exit_kfree;
539 524
540 /* For "new style" drivers, irq is in i2c_client and chip type 525 if (strcmp(client->name, "rs5c372a") == 0)
541 * info comes from i2c_client.dev.platform_data. Meanwhile: 526 rs5c372->type = rtc_rs5c372a;
542 * 527 else if (strcmp(client->name, "rs5c372b") == 0)
543 * STICK BOARD-SPECIFIC SETUP CODE RIGHT HERE 528 rs5c372->type = rtc_rs5c372b;
544 */ 529 else if (strcmp(client->name, "rv5c386") == 0)
545 if (rs5c372->type == rtc_undef) { 530 rs5c372->type = rtc_rv5c386;
531 else if (strcmp(client->name, "rv5c387a") == 0)
532 rs5c372->type = rtc_rv5c387a;
533 else {
546 rs5c372->type = rtc_rs5c372b; 534 rs5c372->type = rtc_rs5c372b;
547 dev_warn(&client->dev, "assuming rs5c372b\n"); 535 dev_warn(&client->dev, "assuming rs5c372b\n");
548 } 536 }
@@ -567,7 +555,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
567 break; 555 break;
568 default: 556 default:
569 dev_err(&client->dev, "unknown RTC type\n"); 557 dev_err(&client->dev, "unknown RTC type\n");
570 goto exit_detach; 558 goto exit_kfree;
571 } 559 }
572 560
573 /* if the oscillator lost power and no other software (like 561 /* if the oscillator lost power and no other software (like
@@ -601,7 +589,7 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
601 589
602 if ((i2c_master_send(client, buf, 3)) != 3) { 590 if ((i2c_master_send(client, buf, 3)) != 3) {
603 dev_err(&client->dev, "setup error\n"); 591 dev_err(&client->dev, "setup error\n");
604 goto exit_detach; 592 goto exit_kfree;
605 } 593 }
606 rs5c372->regs[RS5C_REG_CTRL1] = buf[1]; 594 rs5c372->regs[RS5C_REG_CTRL1] = buf[1];
607 rs5c372->regs[RS5C_REG_CTRL2] = buf[2]; 595 rs5c372->regs[RS5C_REG_CTRL2] = buf[2];
@@ -621,14 +609,14 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
621 rs5c372->time24 ? "24hr" : "am/pm" 609 rs5c372->time24 ? "24hr" : "am/pm"
622 ); 610 );
623 611
624 /* FIXME when client->irq exists, use it to register alarm irq */ 612 /* REVISIT use client->irq to register alarm irq ... */
625 613
626 rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name, 614 rs5c372->rtc = rtc_device_register(rs5c372_driver.driver.name,
627 &client->dev, &rs5c372_rtc_ops, THIS_MODULE); 615 &client->dev, &rs5c372_rtc_ops, THIS_MODULE);
628 616
629 if (IS_ERR(rs5c372->rtc)) { 617 if (IS_ERR(rs5c372->rtc)) {
630 err = PTR_ERR(rs5c372->rtc); 618 err = PTR_ERR(rs5c372->rtc);
631 goto exit_detach; 619 goto exit_kfree;
632 } 620 }
633 621
634 err = rs5c_sysfs_register(&client->dev); 622 err = rs5c_sysfs_register(&client->dev);
@@ -640,9 +628,6 @@ static int rs5c372_probe(struct i2c_adapter *adapter, int address, int kind)
640exit_devreg: 628exit_devreg:
641 rtc_device_unregister(rs5c372->rtc); 629 rtc_device_unregister(rs5c372->rtc);
642 630
643exit_detach:
644 i2c_detach_client(client);
645
646exit_kfree: 631exit_kfree:
647 kfree(rs5c372); 632 kfree(rs5c372);
648 633
@@ -650,24 +635,12 @@ exit:
650 return err; 635 return err;
651} 636}
652 637
653static int rs5c372_attach(struct i2c_adapter *adapter) 638static int rs5c372_remove(struct i2c_client *client)
654{ 639{
655 return i2c_probe(adapter, &addr_data, rs5c372_probe);
656}
657
658static int rs5c372_detach(struct i2c_client *client)
659{
660 int err;
661 struct rs5c372 *rs5c372 = i2c_get_clientdata(client); 640 struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
662 641
663 if (rs5c372->rtc) 642 rtc_device_unregister(rs5c372->rtc);
664 rtc_device_unregister(rs5c372->rtc); 643 rs5c_sysfs_unregister(&client->dev);
665
666 /* REVISIT properly destroy the sysfs files ... */
667
668 if ((err = i2c_detach_client(client)))
669 return err;
670
671 kfree(rs5c372); 644 kfree(rs5c372);
672 return 0; 645 return 0;
673} 646}
@@ -676,8 +649,8 @@ static struct i2c_driver rs5c372_driver = {
676 .driver = { 649 .driver = {
677 .name = "rtc-rs5c372", 650 .name = "rtc-rs5c372",
678 }, 651 },
679 .attach_adapter = &rs5c372_attach, 652 .probe = rs5c372_probe,
680 .detach_client = &rs5c372_detach, 653 .remove = rs5c372_remove,
681}; 654};
682 655
683static __init int rs5c372_init(void) 656static __init int rs5c372_init(void)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 1340451ea408..35765f6a86e0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -747,14 +747,9 @@ dcssblk_check_params(void)
747static void __exit 747static void __exit
748dcssblk_exit(void) 748dcssblk_exit(void)
749{ 749{
750 int rc;
751
752 PRINT_DEBUG("DCSSBLOCK EXIT...\n"); 750 PRINT_DEBUG("DCSSBLOCK EXIT...\n");
753 s390_root_dev_unregister(dcssblk_root_dev); 751 s390_root_dev_unregister(dcssblk_root_dev);
754 rc = unregister_blkdev(dcssblk_major, DCSSBLK_NAME); 752 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
755 if (rc) {
756 PRINT_ERR("unregister_blkdev() failed!\n");
757 }
758 PRINT_DEBUG("...finished!\n"); 753 PRINT_DEBUG("...finished!\n");
759} 754}
760 755
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 512857a23169..5157a2abc58d 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -619,8 +619,7 @@ static void __exit jsflash_cleanup_module(void)
619 jsf0.busy = 0; 619 jsf0.busy = 0;
620 620
621 misc_deregister(&jsf_dev); 621 misc_deregister(&jsf_dev);
622 if (unregister_blkdev(JSFD_MAJOR, "jsfd") != 0) 622 unregister_blkdev(JSFD_MAJOR, "jsfd");
623 printk("jsfd: cleanup_module failed\n");
624 blk_cleanup_queue(jsf_queue); 623 blk_cleanup_queue(jsf_queue);
625} 624}
626 625
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d70ddfda93fc..9c5342e7a69c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -40,6 +40,7 @@
40 40
41#include <linux/err.h> 41#include <linux/err.h>
42#include <linux/blkdev.h> 42#include <linux/blkdev.h>
43#include <linux/freezer.h>
43#include <linux/scatterlist.h> 44#include <linux/scatterlist.h>
44 45
45/* ---------- SCSI Host glue ---------- */ 46/* ---------- SCSI Host glue ---------- */
@@ -868,8 +869,6 @@ static int sas_queue_thread(void *_sas_ha)
868{ 869{
869 struct sas_ha_struct *sas_ha = _sas_ha; 870 struct sas_ha_struct *sas_ha = _sas_ha;
870 871
871 current->flags |= PF_NOFREEZE;
872
873 while (1) { 872 while (1) {
874 set_current_state(TASK_INTERRUPTIBLE); 873 set_current_state(TASK_INTERRUPTIBLE);
875 schedule(); 874 schedule();
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9adb64ac054c..8a525abda30f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -19,6 +19,7 @@
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/freezer.h>
22#include <linux/kthread.h> 23#include <linux/kthread.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
@@ -1516,8 +1517,6 @@ int scsi_error_handler(void *data)
1516{ 1517{
1517 struct Scsi_Host *shost = data; 1518 struct Scsi_Host *shost = data;
1518 1519
1519 current->flags |= PF_NOFREEZE;
1520
1521 /* 1520 /*
1522 * We use TASK_INTERRUPTIBLE so that the thread is not 1521 * We use TASK_INTERRUPTIBLE so that the thread is not
1523 * counted against the load average as a running process. 1522 * counted against the load average as a running process.
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index cab42cbd920d..7fa413ddccf5 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -338,6 +338,34 @@ config SERIAL_AMBA_PL011_CONSOLE
338 your boot loader (lilo or loadlin) about how to pass options to the 338 your boot loader (lilo or loadlin) about how to pass options to the
339 kernel at boot time.) 339 kernel at boot time.)
340 340
341config SERIAL_SB1250_DUART
342 tristate "BCM1xxx on-chip DUART serial support"
343 depends on SIBYTE_SB1xxx_SOC=y
344 select SERIAL_CORE
345 default y
346 ---help---
347 Support for the asynchronous serial interface (DUART) included in
348 the BCM1250 and derived System-On-a-Chip (SOC) devices. Note that
349 the letter D in DUART stands for "dual", which is how the device
350 is implemented. Depending on the SOC configuration there may be
351 one or more DUARTs available of which all are handled.
352
353 If unsure, say Y. To compile this driver as a module, choose M here:
354 the module will be called sb1250-duart.
355
356config SERIAL_SB1250_DUART_CONSOLE
357 bool "Support for console on a BCM1xxx DUART serial port"
358 depends on SERIAL_SB1250_DUART=y
359 select SERIAL_CORE_CONSOLE
360 default y
361 ---help---
362 If you say Y here, it will be possible to use a serial port as the
363 system console (the system console is the device which receives all
364 kernel messages and warnings and which allows logins in single user
365 mode).
366
367 If unsure, say Y.
368
341config SERIAL_ATMEL 369config SERIAL_ATMEL
342 bool "AT91 / AT32 on-chip serial port support" 370 bool "AT91 / AT32 on-chip serial port support"
343 depends on (ARM && ARCH_AT91) || AVR32 371 depends on (ARM && ARCH_AT91) || AVR32
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 08ad0d978183..c48cdd61b736 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
51obj-$(CONFIG_SERIAL_ICOM) += icom.o 51obj-$(CONFIG_SERIAL_ICOM) += icom.o
52obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o 52obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
53obj-$(CONFIG_SERIAL_MPSC) += mpsc.o 53obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
54obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
54obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o 55obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
55obj-$(CONFIG_SERIAL_JSM) += jsm/ 56obj-$(CONFIG_SERIAL_JSM) += jsm/
56obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o 57obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
new file mode 100644
index 000000000000..1d9d7285172a
--- /dev/null
+++ b/drivers/serial/sb1250-duart.c
@@ -0,0 +1,972 @@
1/*
2 * drivers/serial/sb1250-duart.c
3 *
4 * Support for the asynchronous serial interface (DUART) included
5 * in the BCM1250 and derived System-On-a-Chip (SOC) devices.
6 *
7 * Copyright (c) 2007 Maciej W. Rozycki
8 *
9 * Derived from drivers/char/sb1250_duart.c for which the following
10 * copyright applies:
11 *
12 * Copyright (c) 2000, 2001, 2002, 2003, 2004 Broadcom Corporation
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 * References:
20 *
21 * "BCM1250/BCM1125/BCM1125H User Manual", Broadcom Corporation
22 */
23
24#if defined(CONFIG_SERIAL_SB1250_DUART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
25#define SUPPORT_SYSRQ
26#endif
27
28#include <linux/console.h>
29#include <linux/delay.h>
30#include <linux/errno.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/ioport.h>
34#include <linux/kernel.h>
35#include <linux/major.h>
36#include <linux/serial.h>
37#include <linux/serial_core.h>
38#include <linux/spinlock.h>
39#include <linux/sysrq.h>
40#include <linux/tty.h>
41#include <linux/types.h>
42
43#include <asm/atomic.h>
44#include <asm/io.h>
45#include <asm/war.h>
46
47#include <asm/sibyte/sb1250.h>
48#include <asm/sibyte/sb1250_uart.h>
49#include <asm/sibyte/swarm.h>
50
51
52#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
53#include <asm/sibyte/bcm1480_regs.h>
54#include <asm/sibyte/bcm1480_int.h>
55
56#define SBD_CHANREGS(line) A_BCM1480_DUART_CHANREG((line), 0)
57#define SBD_CTRLREGS(line) A_BCM1480_DUART_CTRLREG((line), 0)
58#define SBD_INT(line) (K_BCM1480_INT_UART_0 + (line))
59
60#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
61#include <asm/sibyte/sb1250_regs.h>
62#include <asm/sibyte/sb1250_int.h>
63
64#define SBD_CHANREGS(line) A_DUART_CHANREG((line), 0)
65#define SBD_CTRLREGS(line) A_DUART_CTRLREG(0)
66#define SBD_INT(line) (K_INT_UART_0 + (line))
67
68#else
69#error invalid SB1250 UART configuration
70
71#endif
72
73
74MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
75MODULE_DESCRIPTION("BCM1xxx on-chip DUART serial driver");
76MODULE_LICENSE("GPL");
77
78
79#define DUART_MAX_CHIP 2
80#define DUART_MAX_SIDE 2
81
82/*
83 * Per-port state.
84 */
85struct sbd_port {
86 struct sbd_duart *duart;
87 struct uart_port port;
88 unsigned char __iomem *memctrl;
89 int tx_stopped;
90 int initialised;
91};
92
93/*
94 * Per-DUART state for the shared register space.
95 */
96struct sbd_duart {
97 struct sbd_port sport[2];
98 unsigned long mapctrl;
99 atomic_t map_guard;
100};
101
102#define to_sport(uport) container_of(uport, struct sbd_port, port)
103
104static struct sbd_duart sbd_duarts[DUART_MAX_CHIP];
105
106#define __unused __attribute__((__unused__))
107
108
109/*
110 * Reading and writing SB1250 DUART registers.
111 *
112 * There are three register spaces: two per-channel ones and
113 * a shared one. We have to define accessors appropriately.
114 * All registers are 64-bit and all but the Baud Rate Clock
115 * registers only define 8 least significant bits. There is
116 * also a workaround to take into account. Raw accessors use
117 * the full register width, but cooked ones truncate it
118 * intentionally so that the rest of the driver does not care.
119 */
120static u64 __read_sbdchn(struct sbd_port *sport, int reg)
121{
122 void __iomem *csr = sport->port.membase + reg;
123
124 return __raw_readq(csr);
125}
126
127static u64 __read_sbdshr(struct sbd_port *sport, int reg)
128{
129 void __iomem *csr = sport->memctrl + reg;
130
131 return __raw_readq(csr);
132}
133
134static void __write_sbdchn(struct sbd_port *sport, int reg, u64 value)
135{
136 void __iomem *csr = sport->port.membase + reg;
137
138 __raw_writeq(value, csr);
139}
140
141static void __write_sbdshr(struct sbd_port *sport, int reg, u64 value)
142{
143 void __iomem *csr = sport->memctrl + reg;
144
145 __raw_writeq(value, csr);
146}
147
148/*
149 * In bug 1956, we get glitches that can mess up uart registers. This
150 * "read-mode-reg after any register access" is an accepted workaround.
151 */
152static void __war_sbd1956(struct sbd_port *sport)
153{
154 __read_sbdchn(sport, R_DUART_MODE_REG_1);
155 __read_sbdchn(sport, R_DUART_MODE_REG_2);
156}
157
158static unsigned char read_sbdchn(struct sbd_port *sport, int reg)
159{
160 unsigned char retval;
161
162 retval = __read_sbdchn(sport, reg);
163 if (SIBYTE_1956_WAR)
164 __war_sbd1956(sport);
165 return retval;
166}
167
168static unsigned char read_sbdshr(struct sbd_port *sport, int reg)
169{
170 unsigned char retval;
171
172 retval = __read_sbdshr(sport, reg);
173 if (SIBYTE_1956_WAR)
174 __war_sbd1956(sport);
175 return retval;
176}
177
178static void write_sbdchn(struct sbd_port *sport, int reg, unsigned int value)
179{
180 __write_sbdchn(sport, reg, value);
181 if (SIBYTE_1956_WAR)
182 __war_sbd1956(sport);
183}
184
185static void write_sbdshr(struct sbd_port *sport, int reg, unsigned int value)
186{
187 __write_sbdshr(sport, reg, value);
188 if (SIBYTE_1956_WAR)
189 __war_sbd1956(sport);
190}
191
192
193static int sbd_receive_ready(struct sbd_port *sport)
194{
195 return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_RX_RDY;
196}
197
198static int sbd_receive_drain(struct sbd_port *sport)
199{
200 int loops = 10000;
201
202 while (sbd_receive_ready(sport) && loops--)
203 read_sbdchn(sport, R_DUART_RX_HOLD);
204 return loops;
205}
206
207static int __unused sbd_transmit_ready(struct sbd_port *sport)
208{
209 return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_RDY;
210}
211
212static int __unused sbd_transmit_drain(struct sbd_port *sport)
213{
214 int loops = 10000;
215
216 while (!sbd_transmit_ready(sport) && loops--)
217 udelay(2);
218 return loops;
219}
220
221static int sbd_transmit_empty(struct sbd_port *sport)
222{
223 return read_sbdchn(sport, R_DUART_STATUS) & M_DUART_TX_EMT;
224}
225
226static int sbd_line_drain(struct sbd_port *sport)
227{
228 int loops = 10000;
229
230 while (!sbd_transmit_empty(sport) && loops--)
231 udelay(2);
232 return loops;
233}
234
235
236static unsigned int sbd_tx_empty(struct uart_port *uport)
237{
238 struct sbd_port *sport = to_sport(uport);
239
240 return sbd_transmit_empty(sport) ? TIOCSER_TEMT : 0;
241}
242
243static unsigned int sbd_get_mctrl(struct uart_port *uport)
244{
245 struct sbd_port *sport = to_sport(uport);
246 unsigned int mctrl, status;
247
248 status = read_sbdshr(sport, R_DUART_IN_PORT);
249 status >>= (uport->line) % 2;
250 mctrl = (!(status & M_DUART_IN_PIN0_VAL) ? TIOCM_CTS : 0) |
251 (!(status & M_DUART_IN_PIN4_VAL) ? TIOCM_CAR : 0) |
252 (!(status & M_DUART_RIN0_PIN) ? TIOCM_RNG : 0) |
253 (!(status & M_DUART_IN_PIN2_VAL) ? TIOCM_DSR : 0);
254 return mctrl;
255}
256
257static void sbd_set_mctrl(struct uart_port *uport, unsigned int mctrl)
258{
259 struct sbd_port *sport = to_sport(uport);
260 unsigned int clr = 0, set = 0, mode2;
261
262 if (mctrl & TIOCM_DTR)
263 set |= M_DUART_SET_OPR2;
264 else
265 clr |= M_DUART_CLR_OPR2;
266 if (mctrl & TIOCM_RTS)
267 set |= M_DUART_SET_OPR0;
268 else
269 clr |= M_DUART_CLR_OPR0;
270 clr <<= (uport->line) % 2;
271 set <<= (uport->line) % 2;
272
273 mode2 = read_sbdchn(sport, R_DUART_MODE_REG_2);
274 mode2 &= ~M_DUART_CHAN_MODE;
275 if (mctrl & TIOCM_LOOP)
276 mode2 |= V_DUART_CHAN_MODE_LCL_LOOP;
277 else
278 mode2 |= V_DUART_CHAN_MODE_NORMAL;
279
280 write_sbdshr(sport, R_DUART_CLEAR_OPR, clr);
281 write_sbdshr(sport, R_DUART_SET_OPR, set);
282 write_sbdchn(sport, R_DUART_MODE_REG_2, mode2);
283}
284
285static void sbd_stop_tx(struct uart_port *uport)
286{
287 struct sbd_port *sport = to_sport(uport);
288
289 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
290 sport->tx_stopped = 1;
291};
292
293static void sbd_start_tx(struct uart_port *uport)
294{
295 struct sbd_port *sport = to_sport(uport);
296 unsigned int mask;
297
298 /* Enable tx interrupts. */
299 mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
300 mask |= M_DUART_IMR_TX;
301 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
302
303 /* Go!, go!, go!... */
304 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
305 sport->tx_stopped = 0;
306};
307
308static void sbd_stop_rx(struct uart_port *uport)
309{
310 struct sbd_port *sport = to_sport(uport);
311
312 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
313};
314
315static void sbd_enable_ms(struct uart_port *uport)
316{
317 struct sbd_port *sport = to_sport(uport);
318
319 write_sbdchn(sport, R_DUART_AUXCTL_X,
320 M_DUART_CIN_CHNG_ENA | M_DUART_CTS_CHNG_ENA);
321}
322
323static void sbd_break_ctl(struct uart_port *uport, int break_state)
324{
325 struct sbd_port *sport = to_sport(uport);
326
327 if (break_state == -1)
328 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_START_BREAK);
329 else
330 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_STOP_BREAK);
331}
332
333
334static void sbd_receive_chars(struct sbd_port *sport)
335{
336 struct uart_port *uport = &sport->port;
337 struct uart_icount *icount;
338 unsigned int status, ch, flag;
339 int count;
340
341 for (count = 16; count; count--) {
342 status = read_sbdchn(sport, R_DUART_STATUS);
343 if (!(status & M_DUART_RX_RDY))
344 break;
345
346 ch = read_sbdchn(sport, R_DUART_RX_HOLD);
347
348 flag = TTY_NORMAL;
349
350 icount = &uport->icount;
351 icount->rx++;
352
353 if (unlikely(status &
354 (M_DUART_RCVD_BRK | M_DUART_FRM_ERR |
355 M_DUART_PARITY_ERR | M_DUART_OVRUN_ERR))) {
356 if (status & M_DUART_RCVD_BRK) {
357 icount->brk++;
358 if (uart_handle_break(uport))
359 continue;
360 } else if (status & M_DUART_FRM_ERR)
361 icount->frame++;
362 else if (status & M_DUART_PARITY_ERR)
363 icount->parity++;
364 if (status & M_DUART_OVRUN_ERR)
365 icount->overrun++;
366
367 status &= uport->read_status_mask;
368 if (status & M_DUART_RCVD_BRK)
369 flag = TTY_BREAK;
370 else if (status & M_DUART_FRM_ERR)
371 flag = TTY_FRAME;
372 else if (status & M_DUART_PARITY_ERR)
373 flag = TTY_PARITY;
374 }
375
376 if (uart_handle_sysrq_char(uport, ch))
377 continue;
378
379 uart_insert_char(uport, status, M_DUART_OVRUN_ERR, ch, flag);
380 }
381
382 tty_flip_buffer_push(uport->info->tty);
383}
384
385static void sbd_transmit_chars(struct sbd_port *sport)
386{
387 struct uart_port *uport = &sport->port;
388 struct circ_buf *xmit = &sport->port.info->xmit;
389 unsigned int mask;
390 int stop_tx;
391
392 /* XON/XOFF chars. */
393 if (sport->port.x_char) {
394 write_sbdchn(sport, R_DUART_TX_HOLD, sport->port.x_char);
395 sport->port.icount.tx++;
396 sport->port.x_char = 0;
397 return;
398 }
399
400 /* If nothing to do or stopped or hardware stopped. */
401 stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port));
402
403 /* Send char. */
404 if (!stop_tx) {
405 write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]);
406 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
407 sport->port.icount.tx++;
408
409 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
410 uart_write_wakeup(&sport->port);
411 }
412
413 /* Are we are done? */
414 if (stop_tx || uart_circ_empty(xmit)) {
415 /* Disable tx interrupts. */
416 mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
417 mask &= ~M_DUART_IMR_TX;
418 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
419 }
420}
421
422static void sbd_status_handle(struct sbd_port *sport)
423{
424 struct uart_port *uport = &sport->port;
425 unsigned int delta;
426
427 delta = read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
428 delta >>= (uport->line) % 2;
429
430 if (delta & (M_DUART_IN_PIN0_VAL << S_DUART_IN_PIN_CHNG))
431 uart_handle_cts_change(uport, !(delta & M_DUART_IN_PIN0_VAL));
432
433 if (delta & (M_DUART_IN_PIN2_VAL << S_DUART_IN_PIN_CHNG))
434 uport->icount.dsr++;
435
436 if (delta & ((M_DUART_IN_PIN2_VAL | M_DUART_IN_PIN0_VAL) <<
437 S_DUART_IN_PIN_CHNG))
438 wake_up_interruptible(&uport->info->delta_msr_wait);
439}
440
441static irqreturn_t sbd_interrupt(int irq, void *dev_id)
442{
443 struct sbd_port *sport = dev_id;
444 struct uart_port *uport = &sport->port;
445 irqreturn_t status = IRQ_NONE;
446 unsigned int intstat;
447 int count;
448
449 for (count = 16; count; count--) {
450 intstat = read_sbdshr(sport,
451 R_DUART_ISRREG((uport->line) % 2));
452 intstat &= read_sbdshr(sport,
453 R_DUART_IMRREG((uport->line) % 2));
454 intstat &= M_DUART_ISR_ALL;
455 if (!intstat)
456 break;
457
458 if (intstat & M_DUART_ISR_RX)
459 sbd_receive_chars(sport);
460 if (intstat & M_DUART_ISR_IN)
461 sbd_status_handle(sport);
462 if (intstat & M_DUART_ISR_TX)
463 sbd_transmit_chars(sport);
464
465 status = IRQ_HANDLED;
466 }
467
468 return status;
469}
470
471
472static int sbd_startup(struct uart_port *uport)
473{
474 struct sbd_port *sport = to_sport(uport);
475 unsigned int mode1;
476 int ret;
477
478 ret = request_irq(sport->port.irq, sbd_interrupt,
479 IRQF_SHARED, "sb1250-duart", sport);
480 if (ret)
481 return ret;
482
483 /* Clear the receive FIFO. */
484 sbd_receive_drain(sport);
485
486 /* Clear the interrupt registers. */
487 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_BREAK_INT);
488 read_sbdshr(sport, R_DUART_INCHREG((uport->line) % 2));
489
490 /* Set rx/tx interrupt to FIFO available. */
491 mode1 = read_sbdchn(sport, R_DUART_MODE_REG_1);
492 mode1 &= ~(M_DUART_RX_IRQ_SEL_RXFULL | M_DUART_TX_IRQ_SEL_TXEMPT);
493 write_sbdchn(sport, R_DUART_MODE_REG_1, mode1);
494
495 /* Disable tx, enable rx. */
496 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_EN);
497 sport->tx_stopped = 1;
498
499 /* Enable interrupts. */
500 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
501 M_DUART_IMR_IN | M_DUART_IMR_RX);
502
503 return 0;
504}
505
506static void sbd_shutdown(struct uart_port *uport)
507{
508 struct sbd_port *sport = to_sport(uport);
509
510 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
511 sport->tx_stopped = 1;
512 free_irq(sport->port.irq, sport);
513}
514
515
516static void sbd_init_port(struct sbd_port *sport)
517{
518 struct uart_port *uport = &sport->port;
519
520 if (sport->initialised)
521 return;
522
523 /* There is no DUART reset feature, so just set some sane defaults. */
524 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_TX);
525 write_sbdchn(sport, R_DUART_CMD, V_DUART_MISC_CMD_RESET_RX);
526 write_sbdchn(sport, R_DUART_MODE_REG_1, V_DUART_BITS_PER_CHAR_8);
527 write_sbdchn(sport, R_DUART_MODE_REG_2, 0);
528 write_sbdchn(sport, R_DUART_FULL_CTL,
529 V_DUART_INT_TIME(0) | V_DUART_SIG_FULL(15));
530 write_sbdchn(sport, R_DUART_OPCR_X, 0);
531 write_sbdchn(sport, R_DUART_AUXCTL_X, 0);
532 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), 0);
533
534 sport->initialised = 1;
535}
536
537static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
538 struct ktermios *old_termios)
539{
540 struct sbd_port *sport = to_sport(uport);
541 unsigned int mode1 = 0, mode2 = 0, aux = 0;
542 unsigned int mode1mask = 0, mode2mask = 0, auxmask = 0;
543 unsigned int oldmode1, oldmode2, oldaux;
544 unsigned int baud, brg;
545 unsigned int command;
546
547 mode1mask |= ~(M_DUART_PARITY_MODE | M_DUART_PARITY_TYPE_ODD |
548 M_DUART_BITS_PER_CHAR);
549 mode2mask |= ~M_DUART_STOP_BIT_LEN_2;
550 auxmask |= ~M_DUART_CTS_CHNG_ENA;
551
552 /* Byte size. */
553 switch (termios->c_cflag & CSIZE) {
554 case CS5:
555 case CS6:
556 /* Unsupported, leave unchanged. */
557 mode1mask |= M_DUART_PARITY_MODE;
558 break;
559 case CS7:
560 mode1 |= V_DUART_BITS_PER_CHAR_7;
561 break;
562 case CS8:
563 default:
564 mode1 |= V_DUART_BITS_PER_CHAR_8;
565 break;
566 }
567
568 /* Parity and stop bits. */
569 if (termios->c_cflag & CSTOPB)
570 mode2 |= M_DUART_STOP_BIT_LEN_2;
571 else
572 mode2 |= M_DUART_STOP_BIT_LEN_1;
573 if (termios->c_cflag & PARENB)
574 mode1 |= V_DUART_PARITY_MODE_ADD;
575 else
576 mode1 |= V_DUART_PARITY_MODE_NONE;
577 if (termios->c_cflag & PARODD)
578 mode1 |= M_DUART_PARITY_TYPE_ODD;
579 else
580 mode1 |= M_DUART_PARITY_TYPE_EVEN;
581
582 baud = uart_get_baud_rate(uport, termios, old_termios, 1200, 5000000);
583 brg = V_DUART_BAUD_RATE(baud);
584 /* The actual lower bound is 1221bps, so compensate. */
585 if (brg > M_DUART_CLK_COUNTER)
586 brg = M_DUART_CLK_COUNTER;
587
588 uart_update_timeout(uport, termios->c_cflag, baud);
589
590 uport->read_status_mask = M_DUART_OVRUN_ERR;
591 if (termios->c_iflag & INPCK)
592 uport->read_status_mask |= M_DUART_FRM_ERR |
593 M_DUART_PARITY_ERR;
594 if (termios->c_iflag & (BRKINT | PARMRK))
595 uport->read_status_mask |= M_DUART_RCVD_BRK;
596
597 uport->ignore_status_mask = 0;
598 if (termios->c_iflag & IGNPAR)
599 uport->ignore_status_mask |= M_DUART_FRM_ERR |
600 M_DUART_PARITY_ERR;
601 if (termios->c_iflag & IGNBRK) {
602 uport->ignore_status_mask |= M_DUART_RCVD_BRK;
603 if (termios->c_iflag & IGNPAR)
604 uport->ignore_status_mask |= M_DUART_OVRUN_ERR;
605 }
606
607 if (termios->c_cflag & CREAD)
608 command = M_DUART_RX_EN;
609 else
610 command = M_DUART_RX_DIS;
611
612 if (termios->c_cflag & CRTSCTS)
613 aux |= M_DUART_CTS_CHNG_ENA;
614 else
615 aux &= ~M_DUART_CTS_CHNG_ENA;
616
617 spin_lock(&uport->lock);
618
619 if (sport->tx_stopped)
620 command |= M_DUART_TX_DIS;
621 else
622 command |= M_DUART_TX_EN;
623
624 oldmode1 = read_sbdchn(sport, R_DUART_MODE_REG_1) & mode1mask;
625 oldmode2 = read_sbdchn(sport, R_DUART_MODE_REG_2) & mode2mask;
626 oldaux = read_sbdchn(sport, R_DUART_AUXCTL_X) & auxmask;
627
628 if (!sport->tx_stopped)
629 sbd_line_drain(sport);
630 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS | M_DUART_RX_DIS);
631
632 write_sbdchn(sport, R_DUART_MODE_REG_1, mode1 | oldmode1);
633 write_sbdchn(sport, R_DUART_MODE_REG_2, mode2 | oldmode2);
634 write_sbdchn(sport, R_DUART_CLK_SEL, brg);
635 write_sbdchn(sport, R_DUART_AUXCTL_X, aux | oldaux);
636
637 write_sbdchn(sport, R_DUART_CMD, command);
638
639 spin_unlock(&uport->lock);
640}
641
642
643static const char *sbd_type(struct uart_port *uport)
644{
645 return "SB1250 DUART";
646}
647
648static void sbd_release_port(struct uart_port *uport)
649{
650 struct sbd_port *sport = to_sport(uport);
651 struct sbd_duart *duart = sport->duart;
652 int map_guard;
653
654 iounmap(sport->memctrl);
655 sport->memctrl = NULL;
656 iounmap(uport->membase);
657 uport->membase = NULL;
658
659 map_guard = atomic_add_return(-1, &duart->map_guard);
660 if (!map_guard)
661 release_mem_region(duart->mapctrl, DUART_CHANREG_SPACING);
662 release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
663}
664
665static int sbd_map_port(struct uart_port *uport)
666{
667 static const char *err = KERN_ERR "sbd: Cannot map MMIO\n";
668 struct sbd_port *sport = to_sport(uport);
669 struct sbd_duart *duart = sport->duart;
670
671 if (!uport->membase)
672 uport->membase = ioremap_nocache(uport->mapbase,
673 DUART_CHANREG_SPACING);
674 if (!uport->membase) {
675 printk(err);
676 return -ENOMEM;
677 }
678
679 if (!sport->memctrl)
680 sport->memctrl = ioremap_nocache(duart->mapctrl,
681 DUART_CHANREG_SPACING);
682 if (!sport->memctrl) {
683 printk(err);
684 iounmap(uport->membase);
685 uport->membase = NULL;
686 return -ENOMEM;
687 }
688
689 return 0;
690}
691
692static int sbd_request_port(struct uart_port *uport)
693{
694 static const char *err = KERN_ERR
695 "sbd: Unable to reserve MMIO resource\n";
696 struct sbd_duart *duart = to_sport(uport)->duart;
697 int map_guard;
698 int ret = 0;
699
700 if (!request_mem_region(uport->mapbase, DUART_CHANREG_SPACING,
701 "sb1250-duart")) {
702 printk(err);
703 return -EBUSY;
704 }
705 map_guard = atomic_add_return(1, &duart->map_guard);
706 if (map_guard == 1) {
707 if (!request_mem_region(duart->mapctrl, DUART_CHANREG_SPACING,
708 "sb1250-duart")) {
709 atomic_add(-1, &duart->map_guard);
710 printk(err);
711 ret = -EBUSY;
712 }
713 }
714 if (!ret) {
715 ret = sbd_map_port(uport);
716 if (ret) {
717 map_guard = atomic_add_return(-1, &duart->map_guard);
718 if (!map_guard)
719 release_mem_region(duart->mapctrl,
720 DUART_CHANREG_SPACING);
721 }
722 }
723 if (ret) {
724 release_mem_region(uport->mapbase, DUART_CHANREG_SPACING);
725 return ret;
726 }
727 return 0;
728}
729
730static void sbd_config_port(struct uart_port *uport, int flags)
731{
732 struct sbd_port *sport = to_sport(uport);
733
734 if (flags & UART_CONFIG_TYPE) {
735 if (sbd_request_port(uport))
736 return;
737
738 uport->type = PORT_SB1250_DUART;
739
740 sbd_init_port(sport);
741 }
742}
743
744static int sbd_verify_port(struct uart_port *uport, struct serial_struct *ser)
745{
746 int ret = 0;
747
748 if (ser->type != PORT_UNKNOWN && ser->type != PORT_SB1250_DUART)
749 ret = -EINVAL;
750 if (ser->irq != uport->irq)
751 ret = -EINVAL;
752 if (ser->baud_base != uport->uartclk / 16)
753 ret = -EINVAL;
754 return ret;
755}
756
757
758static struct uart_ops sbd_ops = {
759 .tx_empty = sbd_tx_empty,
760 .set_mctrl = sbd_set_mctrl,
761 .get_mctrl = sbd_get_mctrl,
762 .stop_tx = sbd_stop_tx,
763 .start_tx = sbd_start_tx,
764 .stop_rx = sbd_stop_rx,
765 .enable_ms = sbd_enable_ms,
766 .break_ctl = sbd_break_ctl,
767 .startup = sbd_startup,
768 .shutdown = sbd_shutdown,
769 .set_termios = sbd_set_termios,
770 .type = sbd_type,
771 .release_port = sbd_release_port,
772 .request_port = sbd_request_port,
773 .config_port = sbd_config_port,
774 .verify_port = sbd_verify_port,
775};
776
777/* Initialize SB1250 DUART port structures. */
778static void __init sbd_probe_duarts(void)
779{
780 static int probed;
781 int chip, side;
782 int max_lines, line;
783
784 if (probed)
785 return;
786
787 /* Set the number of available units based on the SOC type. */
788 switch (soc_type) {
789 case K_SYS_SOC_TYPE_BCM1x55:
790 case K_SYS_SOC_TYPE_BCM1x80:
791 max_lines = 4;
792 break;
793 default:
794 /* Assume at least two serial ports at the normal address. */
795 max_lines = 2;
796 break;
797 }
798
799 probed = 1;
800
801 for (chip = 0, line = 0; chip < DUART_MAX_CHIP && line < max_lines;
802 chip++) {
803 sbd_duarts[chip].mapctrl = SBD_CTRLREGS(line);
804
805 for (side = 0; side < DUART_MAX_SIDE && line < max_lines;
806 side++, line++) {
807 struct sbd_port *sport = &sbd_duarts[chip].sport[side];
808 struct uart_port *uport = &sport->port;
809
810 sport->duart = &sbd_duarts[chip];
811
812 uport->irq = SBD_INT(line);
813 uport->uartclk = 100000000 / 20 * 16;
814 uport->fifosize = 16;
815 uport->iotype = UPIO_MEM;
816 uport->flags = UPF_BOOT_AUTOCONF;
817 uport->ops = &sbd_ops;
818 uport->line = line;
819 uport->mapbase = SBD_CHANREGS(line);
820 }
821 }
822}
823
824
825#ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
826/*
827 * Serial console stuff. Very basic, polling driver for doing serial
828 * console output. The console_sem is held by the caller, so we
829 * shouldn't be interrupted for more console activity.
830 */
831static void sbd_console_putchar(struct uart_port *uport, int ch)
832{
833 struct sbd_port *sport = to_sport(uport);
834
835 sbd_transmit_drain(sport);
836 write_sbdchn(sport, R_DUART_TX_HOLD, ch);
837}
838
839static void sbd_console_write(struct console *co, const char *s,
840 unsigned int count)
841{
842 int chip = co->index / DUART_MAX_SIDE;
843 int side = co->index % DUART_MAX_SIDE;
844 struct sbd_port *sport = &sbd_duarts[chip].sport[side];
845 struct uart_port *uport = &sport->port;
846 unsigned long flags;
847 unsigned int mask;
848
849 /* Disable transmit interrupts and enable the transmitter. */
850 spin_lock_irqsave(&uport->lock, flags);
851 mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2));
852 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2),
853 mask & ~M_DUART_IMR_TX);
854 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_EN);
855 spin_unlock_irqrestore(&uport->lock, flags);
856
857 uart_console_write(&sport->port, s, count, sbd_console_putchar);
858
859 /* Restore transmit interrupts and the transmitter enable. */
860 spin_lock_irqsave(&uport->lock, flags);
861 sbd_line_drain(sport);
862 if (sport->tx_stopped)
863 write_sbdchn(sport, R_DUART_CMD, M_DUART_TX_DIS);
864 write_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2), mask);
865 spin_unlock_irqrestore(&uport->lock, flags);
866}
867
868static int __init sbd_console_setup(struct console *co, char *options)
869{
870 int chip = co->index / DUART_MAX_SIDE;
871 int side = co->index % DUART_MAX_SIDE;
872 struct sbd_port *sport = &sbd_duarts[chip].sport[side];
873 struct uart_port *uport = &sport->port;
874 int baud = 115200;
875 int bits = 8;
876 int parity = 'n';
877 int flow = 'n';
878 int ret;
879
880 if (!sport->duart)
881 return -ENXIO;
882
883 ret = sbd_map_port(uport);
884 if (ret)
885 return ret;
886
887 sbd_init_port(sport);
888
889 if (options)
890 uart_parse_options(options, &baud, &parity, &bits, &flow);
891 return uart_set_options(uport, co, baud, parity, bits, flow);
892}
893
894static struct uart_driver sbd_reg;
895static struct console sbd_console = {
896 .name = "duart",
897 .write = sbd_console_write,
898 .device = uart_console_device,
899 .setup = sbd_console_setup,
900 .flags = CON_PRINTBUFFER,
901 .index = -1,
902 .data = &sbd_reg
903};
904
905static int __init sbd_serial_console_init(void)
906{
907 sbd_probe_duarts();
908 register_console(&sbd_console);
909
910 return 0;
911}
912
913console_initcall(sbd_serial_console_init);
914
915#define SERIAL_SB1250_DUART_CONSOLE &sbd_console
916#else
917#define SERIAL_SB1250_DUART_CONSOLE NULL
918#endif /* CONFIG_SERIAL_SB1250_DUART_CONSOLE */
919
920
921static struct uart_driver sbd_reg = {
922 .owner = THIS_MODULE,
923 .driver_name = "serial",
924 .dev_name = "duart",
925 .major = TTY_MAJOR,
926 .minor = SB1250_DUART_MINOR_BASE,
927 .nr = DUART_MAX_CHIP * DUART_MAX_SIDE,
928 .cons = SERIAL_SB1250_DUART_CONSOLE,
929};
930
931/* Set up the driver and register it. */
932static int __init sbd_init(void)
933{
934 int i, ret;
935
936 sbd_probe_duarts();
937
938 ret = uart_register_driver(&sbd_reg);
939 if (ret)
940 return ret;
941
942 for (i = 0; i < DUART_MAX_CHIP * DUART_MAX_SIDE; i++) {
943 struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
944 struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
945 struct uart_port *uport = &sport->port;
946
947 if (sport->duart)
948 uart_add_one_port(&sbd_reg, uport);
949 }
950
951 return 0;
952}
953
954/* Unload the driver. Unregister stuff, get ready to go away. */
955static void __exit sbd_exit(void)
956{
957 int i;
958
959 for (i = DUART_MAX_CHIP * DUART_MAX_SIDE - 1; i >= 0; i--) {
960 struct sbd_duart *duart = &sbd_duarts[i / DUART_MAX_SIDE];
961 struct sbd_port *sport = &duart->sport[i % DUART_MAX_SIDE];
962 struct uart_port *uport = &sport->port;
963
964 if (sport->duart)
965 uart_remove_one_port(&sbd_reg, uport);
966 }
967
968 uart_unregister_driver(&sbd_reg);
969}
970
971module_init(sbd_init);
972module_exit(sbd_exit);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 5e3f748f2693..b91571122daa 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -107,6 +107,15 @@ config SPI_IMX
107 This enables using the Freescale iMX SPI controller in master 107 This enables using the Freescale iMX SPI controller in master
108 mode. 108 mode.
109 109
110config SPI_LM70_LLP
111 tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
112 depends on SPI_MASTER && PARPORT && EXPERIMENTAL
113 select SPI_BITBANG
114 help
115 This driver supports the NS LM70 LLP Evaluation Board,
116 which interfaces to an LM70 temperature sensor using
117 a parallel port.
118
110config SPI_MPC52xx_PSC 119config SPI_MPC52xx_PSC
111 tristate "Freescale MPC52xx PSC SPI controller" 120 tristate "Freescale MPC52xx PSC SPI controller"
112 depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL 121 depends on SPI_MASTER && PPC_MPC52xx && EXPERIMENTAL
@@ -133,6 +142,12 @@ config SPI_OMAP_UWIRE
133 help 142 help
134 This hooks up to the MicroWire controller on OMAP1 chips. 143 This hooks up to the MicroWire controller on OMAP1 chips.
135 144
145config SPI_OMAP24XX
146 tristate "McSPI driver for OMAP24xx"
147 depends on SPI_MASTER && ARCH_OMAP24XX
148 help
149 SPI master controller for OMAP24xx Multichannel SPI
150 (McSPI) modules.
136 151
137config SPI_PXA2XX 152config SPI_PXA2XX
138 tristate "PXA2xx SSP SPI master" 153 tristate "PXA2xx SSP SPI master"
@@ -145,17 +160,36 @@ config SPI_PXA2XX
145config SPI_S3C24XX 160config SPI_S3C24XX
146 tristate "Samsung S3C24XX series SPI" 161 tristate "Samsung S3C24XX series SPI"
147 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL 162 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
163 select SPI_BITBANG
148 help 164 help
149 SPI driver for Samsung S3C24XX series ARM SoCs 165 SPI driver for Samsung S3C24XX series ARM SoCs
150 166
151config SPI_S3C24XX_GPIO 167config SPI_S3C24XX_GPIO
152 tristate "Samsung S3C24XX series SPI by GPIO" 168 tristate "Samsung S3C24XX series SPI by GPIO"
153 depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL 169 depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL
170 select SPI_BITBANG
154 help 171 help
155 SPI driver for Samsung S3C24XX series ARM SoCs using 172 SPI driver for Samsung S3C24XX series ARM SoCs using
156 GPIO lines to provide the SPI bus. This can be used where 173 GPIO lines to provide the SPI bus. This can be used where
157 the inbuilt hardware cannot provide the transfer mode, or 174 the inbuilt hardware cannot provide the transfer mode, or
158 where the board is using non hardware connected pins. 175 where the board is using non hardware connected pins.
176
177config SPI_TXX9
178 tristate "Toshiba TXx9 SPI controller"
179 depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
180 help
181 SPI driver for Toshiba TXx9 MIPS SoCs
182
183config SPI_XILINX
184 tristate "Xilinx SPI controller"
185 depends on SPI_MASTER && XILINX_VIRTEX && EXPERIMENTAL
186 select SPI_BITBANG
187 help
188 This exposes the SPI controller IP from the Xilinx EDK.
189
190 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
191 Product Specification document (DS464) for hardware details.
192
159# 193#
160# Add new SPI master controllers in alphabetical order above this line 194# Add new SPI master controllers in alphabetical order above this line
161# 195#
@@ -187,6 +221,15 @@ config SPI_SPIDEV
187 Note that this application programming interface is EXPERIMENTAL 221 Note that this application programming interface is EXPERIMENTAL
188 and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes. 222 and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
189 223
224config SPI_TLE62X0
225 tristate "Infineon TLE62X0 (for power switching)"
226 depends on SPI_MASTER && SYSFS
227 help
228 SPI driver for Infineon TLE62X0 series line driver chips,
229 such as the TLE6220, TLE6230 and TLE6240. This provides a
230 sysfs interface, with each line presented as a kind of GPIO
231 exposing both switch control and diagnostic feedback.
232
190# 233#
191# Add new SPI protocol masters in alphabetical order above this line 234# Add new SPI protocol masters in alphabetical order above this line
192# 235#
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 5788d867de84..41fbac45c323 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,17 +17,22 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_IMX) += spi_imx.o 19obj-$(CONFIG_SPI_IMX) += spi_imx.o
20obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
20obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 21obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
21obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 22obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
23obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
22obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 24obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
23obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 25obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
24obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 26obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
25obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 27obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
28obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
29obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
26# ... add above this line ... 30# ... add above this line ...
27 31
28# SPI protocol drivers (device/link on bus) 32# SPI protocol drivers (device/link on bus)
29obj-$(CONFIG_SPI_AT25) += at25.o 33obj-$(CONFIG_SPI_AT25) += at25.o
30obj-$(CONFIG_SPI_SPIDEV) += spidev.o 34obj-$(CONFIG_SPI_SPIDEV) += spidev.o
35obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
31# ... add above this line ... 36# ... add above this line ...
32 37
33# SPI slave controller drivers (upstream link) 38# SPI slave controller drivers (upstream link)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 8b2601de3630..ad144054da30 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -46,6 +46,7 @@ struct atmel_spi {
46 struct clk *clk; 46 struct clk *clk;
47 struct platform_device *pdev; 47 struct platform_device *pdev;
48 unsigned new_1:1; 48 unsigned new_1:1;
49 struct spi_device *stay;
49 50
50 u8 stopping; 51 u8 stopping;
51 struct list_head queue; 52 struct list_head queue;
@@ -62,29 +63,62 @@ struct atmel_spi {
62/* 63/*
63 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby 64 * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
64 * they assume that spi slave device state will not change on deselect, so 65 * they assume that spi slave device state will not change on deselect, so
65 * that automagic deselection is OK. Not so! Workaround uses nCSx pins 66 * that automagic deselection is OK. ("NPCSx rises if no data is to be
66 * as GPIOs; or newer controllers have CSAAT and friends. 67 * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
68 * controllers have CSAAT and friends.
67 * 69 *
68 * Since the CSAAT functionality is a bit weird on newer controllers 70 * Since the CSAAT functionality is a bit weird on newer controllers as
69 * as well, we use GPIO to control nCSx pins on all controllers. 71 * well, we use GPIO to control nCSx pins on all controllers, updating
72 * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
73 * support active-high chipselects despite the controller's belief that
74 * only active-low devices/systems exists.
75 *
76 * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
77 * right when driven with GPIO. ("Mode Fault does not allow more than one
78 * Master on Chip Select 0.") No workaround exists for that ... so for
79 * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
80 * and (c) will trigger that first erratum in some cases.
70 */ 81 */
71 82
72static inline void cs_activate(struct spi_device *spi) 83static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
73{ 84{
74 unsigned gpio = (unsigned) spi->controller_data; 85 unsigned gpio = (unsigned) spi->controller_data;
75 unsigned active = spi->mode & SPI_CS_HIGH; 86 unsigned active = spi->mode & SPI_CS_HIGH;
87 u32 mr;
88
89 mr = spi_readl(as, MR);
90 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
76 91
77 dev_dbg(&spi->dev, "activate %u%s\n", gpio, active ? " (high)" : ""); 92 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
78 gpio_set_value(gpio, active); 93 gpio, active ? " (high)" : "",
94 mr);
95
96 if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
97 gpio_set_value(gpio, active);
98 spi_writel(as, MR, mr);
79} 99}
80 100
81static inline void cs_deactivate(struct spi_device *spi) 101static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
82{ 102{
83 unsigned gpio = (unsigned) spi->controller_data; 103 unsigned gpio = (unsigned) spi->controller_data;
84 unsigned active = spi->mode & SPI_CS_HIGH; 104 unsigned active = spi->mode & SPI_CS_HIGH;
105 u32 mr;
85 106
86 dev_dbg(&spi->dev, "DEactivate %u%s\n", gpio, active ? " (low)" : ""); 107 /* only deactivate *this* device; sometimes transfers to
87 gpio_set_value(gpio, !active); 108 * another device may be active when this routine is called.
109 */
110 mr = spi_readl(as, MR);
111 if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
112 mr = SPI_BFINS(PCS, 0xf, mr);
113 spi_writel(as, MR, mr);
114 }
115
116 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
117 gpio, active ? " (low)" : "",
118 mr);
119
120 if (!(cpu_is_at91rm9200() && spi->chip_select == 0))
121 gpio_set_value(gpio, !active);
88} 122}
89 123
90/* 124/*
@@ -140,6 +174,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
140 174
141 /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer" 175 /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
142 * mechanism might help avoid the IRQ latency between transfers 176 * mechanism might help avoid the IRQ latency between transfers
177 * (and improve the nCS0 errata handling on at91rm9200 chips)
143 * 178 *
144 * We're also waiting for ENDRX before we start the next 179 * We're also waiting for ENDRX before we start the next
145 * transfer because we need to handle some difficult timing 180 * transfer because we need to handle some difficult timing
@@ -169,33 +204,62 @@ static void atmel_spi_next_message(struct spi_master *master)
169{ 204{
170 struct atmel_spi *as = spi_master_get_devdata(master); 205 struct atmel_spi *as = spi_master_get_devdata(master);
171 struct spi_message *msg; 206 struct spi_message *msg;
172 u32 mr; 207 struct spi_device *spi;
173 208
174 BUG_ON(as->current_transfer); 209 BUG_ON(as->current_transfer);
175 210
176 msg = list_entry(as->queue.next, struct spi_message, queue); 211 msg = list_entry(as->queue.next, struct spi_message, queue);
212 spi = msg->spi;
177 213
178 /* Select the chip */ 214 dev_dbg(master->cdev.dev, "start message %p for %s\n",
179 mr = spi_readl(as, MR); 215 msg, spi->dev.bus_id);
180 mr = SPI_BFINS(PCS, ~(1 << msg->spi->chip_select), mr); 216
181 spi_writel(as, MR, mr); 217 /* select chip if it's not still active */
182 cs_activate(msg->spi); 218 if (as->stay) {
219 if (as->stay != spi) {
220 cs_deactivate(as, as->stay);
221 cs_activate(as, spi);
222 }
223 as->stay = NULL;
224 } else
225 cs_activate(as, spi);
183 226
184 atmel_spi_next_xfer(master, msg); 227 atmel_spi_next_xfer(master, msg);
185} 228}
186 229
187static void 230/*
231 * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
232 * - The buffer is either valid for CPU access, else NULL
233 * - If the buffer is valid, so is its DMA addresss
234 *
235 * This driver manages the dma addresss unless message->is_dma_mapped.
236 */
237static int
188atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) 238atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
189{ 239{
240 struct device *dev = &as->pdev->dev;
241
190 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 242 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
191 if (xfer->tx_buf) 243 if (xfer->tx_buf) {
192 xfer->tx_dma = dma_map_single(&as->pdev->dev, 244 xfer->tx_dma = dma_map_single(dev,
193 (void *) xfer->tx_buf, xfer->len, 245 (void *) xfer->tx_buf, xfer->len,
194 DMA_TO_DEVICE); 246 DMA_TO_DEVICE);
195 if (xfer->rx_buf) 247 if (dma_mapping_error(xfer->tx_dma))
196 xfer->rx_dma = dma_map_single(&as->pdev->dev, 248 return -ENOMEM;
249 }
250 if (xfer->rx_buf) {
251 xfer->rx_dma = dma_map_single(dev,
197 xfer->rx_buf, xfer->len, 252 xfer->rx_buf, xfer->len,
198 DMA_FROM_DEVICE); 253 DMA_FROM_DEVICE);
254 if (dma_mapping_error(xfer->tx_dma)) {
255 if (xfer->tx_buf)
256 dma_unmap_single(dev,
257 xfer->tx_dma, xfer->len,
258 DMA_TO_DEVICE);
259 return -ENOMEM;
260 }
261 }
262 return 0;
199} 263}
200 264
201static void atmel_spi_dma_unmap_xfer(struct spi_master *master, 265static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
@@ -211,9 +275,13 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
211 275
212static void 276static void
213atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, 277atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
214 struct spi_message *msg, int status) 278 struct spi_message *msg, int status, int stay)
215{ 279{
216 cs_deactivate(msg->spi); 280 if (!stay || status < 0)
281 cs_deactivate(as, msg->spi);
282 else
283 as->stay = msg->spi;
284
217 list_del(&msg->queue); 285 list_del(&msg->queue);
218 msg->status = status; 286 msg->status = status;
219 287
@@ -303,7 +371,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
303 /* Clear any overrun happening while cleaning up */ 371 /* Clear any overrun happening while cleaning up */
304 spi_readl(as, SR); 372 spi_readl(as, SR);
305 373
306 atmel_spi_msg_done(master, as, msg, -EIO); 374 atmel_spi_msg_done(master, as, msg, -EIO, 0);
307 } else if (pending & SPI_BIT(ENDRX)) { 375 } else if (pending & SPI_BIT(ENDRX)) {
308 ret = IRQ_HANDLED; 376 ret = IRQ_HANDLED;
309 377
@@ -321,12 +389,13 @@ atmel_spi_interrupt(int irq, void *dev_id)
321 389
322 if (msg->transfers.prev == &xfer->transfer_list) { 390 if (msg->transfers.prev == &xfer->transfer_list) {
323 /* report completed message */ 391 /* report completed message */
324 atmel_spi_msg_done(master, as, msg, 0); 392 atmel_spi_msg_done(master, as, msg, 0,
393 xfer->cs_change);
325 } else { 394 } else {
326 if (xfer->cs_change) { 395 if (xfer->cs_change) {
327 cs_deactivate(msg->spi); 396 cs_deactivate(as, msg->spi);
328 udelay(1); 397 udelay(1);
329 cs_activate(msg->spi); 398 cs_activate(as, msg->spi);
330 } 399 }
331 400
332 /* 401 /*
@@ -350,6 +419,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
350 return ret; 419 return ret;
351} 420}
352 421
422/* the spi->mode bits understood by this driver: */
353#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH) 423#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
354 424
355static int atmel_spi_setup(struct spi_device *spi) 425static int atmel_spi_setup(struct spi_device *spi)
@@ -388,6 +458,14 @@ static int atmel_spi_setup(struct spi_device *spi)
388 return -EINVAL; 458 return -EINVAL;
389 } 459 }
390 460
461 /* see notes above re chipselect */
462 if (cpu_is_at91rm9200()
463 && spi->chip_select == 0
464 && (spi->mode & SPI_CS_HIGH)) {
465 dev_dbg(&spi->dev, "setup: can't be active-high\n");
466 return -EINVAL;
467 }
468
391 /* speed zero convention is used by some upper layers */ 469 /* speed zero convention is used by some upper layers */
392 bus_hz = clk_get_rate(as->clk); 470 bus_hz = clk_get_rate(as->clk);
393 if (spi->max_speed_hz) { 471 if (spi->max_speed_hz) {
@@ -397,8 +475,9 @@ static int atmel_spi_setup(struct spi_device *spi)
397 scbr = ((bus_hz + spi->max_speed_hz - 1) 475 scbr = ((bus_hz + spi->max_speed_hz - 1)
398 / spi->max_speed_hz); 476 / spi->max_speed_hz);
399 if (scbr >= (1 << SPI_SCBR_SIZE)) { 477 if (scbr >= (1 << SPI_SCBR_SIZE)) {
400 dev_dbg(&spi->dev, "setup: %d Hz too slow, scbr %u\n", 478 dev_dbg(&spi->dev,
401 spi->max_speed_hz, scbr); 479 "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
480 spi->max_speed_hz, scbr, bus_hz/255);
402 return -EINVAL; 481 return -EINVAL;
403 } 482 }
404 } else 483 } else
@@ -423,6 +502,14 @@ static int atmel_spi_setup(struct spi_device *spi)
423 return ret; 502 return ret;
424 spi->controller_state = (void *)npcs_pin; 503 spi->controller_state = (void *)npcs_pin;
425 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); 504 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
505 } else {
506 unsigned long flags;
507
508 spin_lock_irqsave(&as->lock, flags);
509 if (as->stay == spi)
510 as->stay = NULL;
511 cs_deactivate(as, spi);
512 spin_unlock_irqrestore(&as->lock, flags);
426 } 513 }
427 514
428 dev_dbg(&spi->dev, 515 dev_dbg(&spi->dev,
@@ -464,14 +551,22 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
464 dev_dbg(&spi->dev, "no protocol options yet\n"); 551 dev_dbg(&spi->dev, "no protocol options yet\n");
465 return -ENOPROTOOPT; 552 return -ENOPROTOOPT;
466 } 553 }
467 }
468 554
469 /* scrub dcache "early" */ 555 /*
470 if (!msg->is_dma_mapped) { 556 * DMA map early, for performance (empties dcache ASAP) and
471 list_for_each_entry(xfer, &msg->transfers, transfer_list) 557 * better fault reporting. This is a DMA-only driver.
472 atmel_spi_dma_map_xfer(as, xfer); 558 *
559 * NOTE that if dma_unmap_single() ever starts to do work on
560 * platforms supported by this driver, we would need to clean
561 * up mappings for previously-mapped transfers.
562 */
563 if (!msg->is_dma_mapped) {
564 if (atmel_spi_dma_map_xfer(as, xfer) < 0)
565 return -ENOMEM;
566 }
473 } 567 }
474 568
569#ifdef VERBOSE
475 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 570 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
476 dev_dbg(controller, 571 dev_dbg(controller,
477 " xfer %p: len %u tx %p/%08x rx %p/%08x\n", 572 " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
@@ -479,6 +574,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
479 xfer->tx_buf, xfer->tx_dma, 574 xfer->tx_buf, xfer->tx_dma,
480 xfer->rx_buf, xfer->rx_dma); 575 xfer->rx_buf, xfer->rx_dma);
481 } 576 }
577#endif
482 578
483 msg->status = -EINPROGRESS; 579 msg->status = -EINPROGRESS;
484 msg->actual_length = 0; 580 msg->actual_length = 0;
@@ -494,8 +590,21 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
494 590
495static void atmel_spi_cleanup(struct spi_device *spi) 591static void atmel_spi_cleanup(struct spi_device *spi)
496{ 592{
497 if (spi->controller_state) 593 struct atmel_spi *as = spi_master_get_devdata(spi->master);
498 gpio_free((unsigned int)spi->controller_data); 594 unsigned gpio = (unsigned) spi->controller_data;
595 unsigned long flags;
596
597 if (!spi->controller_state)
598 return;
599
600 spin_lock_irqsave(&as->lock, flags);
601 if (as->stay == spi) {
602 as->stay = NULL;
603 cs_deactivate(as, spi);
604 }
605 spin_unlock_irqrestore(&as->lock, flags);
606
607 gpio_free(gpio);
499} 608}
500 609
501/*-------------------------------------------------------------------------*/ 610/*-------------------------------------------------------------------------*/
@@ -536,6 +645,10 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
536 645
537 as = spi_master_get_devdata(master); 646 as = spi_master_get_devdata(master);
538 647
648 /*
649 * Scratch buffer is used for throwaway rx and tx data.
650 * It's coherent to minimize dcache pollution.
651 */
539 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, 652 as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
540 &as->buffer_dma, GFP_KERNEL); 653 &as->buffer_dma, GFP_KERNEL);
541 if (!as->buffer) 654 if (!as->buffer)
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index ae2b1af0dba4..c47a650183a1 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -280,6 +280,9 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
280 return 0; 280 return 0;
281} 281}
282 282
283/* the spi->mode bits understood by this driver: */
284#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
285
283static int au1550_spi_setup(struct spi_device *spi) 286static int au1550_spi_setup(struct spi_device *spi)
284{ 287{
285 struct au1550_spi *hw = spi_master_get_devdata(spi->master); 288 struct au1550_spi *hw = spi_master_get_devdata(spi->master);
@@ -292,6 +295,12 @@ static int au1550_spi_setup(struct spi_device *spi)
292 return -EINVAL; 295 return -EINVAL;
293 } 296 }
294 297
298 if (spi->mode & ~MODEBITS) {
299 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
300 spi->mode & ~MODEBITS);
301 return -EINVAL;
302 }
303
295 if (spi->max_speed_hz == 0) 304 if (spi->max_speed_hz == 0)
296 spi->max_speed_hz = hw->freq_max; 305 spi->max_speed_hz = hw->freq_max;
297 if (spi->max_speed_hz > hw->freq_max 306 if (spi->max_speed_hz > hw->freq_max
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 11f36bef3057..d2a4b2bdb07b 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -270,6 +270,9 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
270 spin_unlock_irq(&mps->lock); 270 spin_unlock_irq(&mps->lock);
271} 271}
272 272
273/* the spi->mode bits understood by this driver: */
274#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST)
275
273static int mpc52xx_psc_spi_setup(struct spi_device *spi) 276static int mpc52xx_psc_spi_setup(struct spi_device *spi)
274{ 277{
275 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master); 278 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
@@ -279,6 +282,12 @@ static int mpc52xx_psc_spi_setup(struct spi_device *spi)
279 if (spi->bits_per_word%8) 282 if (spi->bits_per_word%8)
280 return -EINVAL; 283 return -EINVAL;
281 284
285 if (spi->mode & ~MODEBITS) {
286 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
287 spi->mode & ~MODEBITS);
288 return -EINVAL;
289 }
290
282 if (!cs) { 291 if (!cs) {
283 cs = kzalloc(sizeof *cs, GFP_KERNEL); 292 cs = kzalloc(sizeof *cs, GFP_KERNEL);
284 if (!cs) 293 if (!cs)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
new file mode 100644
index 000000000000..6b357cdb9ea3
--- /dev/null
+++ b/drivers/spi/omap2_mcspi.c
@@ -0,0 +1,1081 @@
1/*
2 * OMAP2 McSPI controller driver
3 *
4 * Copyright (C) 2005, 2006 Nokia Corporation
5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
6 * Juha Yrjölä <juha.yrjola@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/device.h>
29#include <linux/delay.h>
30#include <linux/dma-mapping.h>
31#include <linux/platform_device.h>
32#include <linux/err.h>
33#include <linux/clk.h>
34#include <linux/io.h>
35
36#include <linux/spi/spi.h>
37
38#include <asm/arch/dma.h>
39#include <asm/arch/clock.h>
40
41
42#define OMAP2_MCSPI_MAX_FREQ 48000000
43
44#define OMAP2_MCSPI_REVISION 0x00
45#define OMAP2_MCSPI_SYSCONFIG 0x10
46#define OMAP2_MCSPI_SYSSTATUS 0x14
47#define OMAP2_MCSPI_IRQSTATUS 0x18
48#define OMAP2_MCSPI_IRQENABLE 0x1c
49#define OMAP2_MCSPI_WAKEUPENABLE 0x20
50#define OMAP2_MCSPI_SYST 0x24
51#define OMAP2_MCSPI_MODULCTRL 0x28
52
53/* per-channel banks, 0x14 bytes each, first is: */
54#define OMAP2_MCSPI_CHCONF0 0x2c
55#define OMAP2_MCSPI_CHSTAT0 0x30
56#define OMAP2_MCSPI_CHCTRL0 0x34
57#define OMAP2_MCSPI_TX0 0x38
58#define OMAP2_MCSPI_RX0 0x3c
59
60/* per-register bitmasks: */
61
62#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE (1 << 0)
63#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET (1 << 1)
64
65#define OMAP2_MCSPI_SYSSTATUS_RESETDONE (1 << 0)
66
67#define OMAP2_MCSPI_MODULCTRL_SINGLE (1 << 0)
68#define OMAP2_MCSPI_MODULCTRL_MS (1 << 2)
69#define OMAP2_MCSPI_MODULCTRL_STEST (1 << 3)
70
71#define OMAP2_MCSPI_CHCONF_PHA (1 << 0)
72#define OMAP2_MCSPI_CHCONF_POL (1 << 1)
73#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
74#define OMAP2_MCSPI_CHCONF_EPOL (1 << 6)
75#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
76#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY (0x01 << 12)
77#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY (0x02 << 12)
78#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
79#define OMAP2_MCSPI_CHCONF_DMAW (1 << 14)
80#define OMAP2_MCSPI_CHCONF_DMAR (1 << 15)
81#define OMAP2_MCSPI_CHCONF_DPE0 (1 << 16)
82#define OMAP2_MCSPI_CHCONF_DPE1 (1 << 17)
83#define OMAP2_MCSPI_CHCONF_IS (1 << 18)
84#define OMAP2_MCSPI_CHCONF_TURBO (1 << 19)
85#define OMAP2_MCSPI_CHCONF_FORCE (1 << 20)
86
87#define OMAP2_MCSPI_CHSTAT_RXS (1 << 0)
88#define OMAP2_MCSPI_CHSTAT_TXS (1 << 1)
89#define OMAP2_MCSPI_CHSTAT_EOT (1 << 2)
90
91#define OMAP2_MCSPI_CHCTRL_EN (1 << 0)
92
93
94/* We have 2 DMA channels per CS, one for RX and one for TX */
95struct omap2_mcspi_dma {
96 int dma_tx_channel;
97 int dma_rx_channel;
98
99 int dma_tx_sync_dev;
100 int dma_rx_sync_dev;
101
102 struct completion dma_tx_completion;
103 struct completion dma_rx_completion;
104};
105
106/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
107 * cache operations; better heuristics consider wordsize and bitrate.
108 */
109#define DMA_MIN_BYTES 8
110
111
112struct omap2_mcspi {
113 struct work_struct work;
114 /* lock protects queue and registers */
115 spinlock_t lock;
116 struct list_head msg_queue;
117 struct spi_master *master;
118 struct clk *ick;
119 struct clk *fck;
120 /* Virtual base address of the controller */
121 void __iomem *base;
122 /* SPI1 has 4 channels, while SPI2 has 2 */
123 struct omap2_mcspi_dma *dma_channels;
124};
125
126struct omap2_mcspi_cs {
127 void __iomem *base;
128 int word_len;
129};
130
131static struct workqueue_struct *omap2_mcspi_wq;
132
133#define MOD_REG_BIT(val, mask, set) do { \
134 if (set) \
135 val |= mask; \
136 else \
137 val &= ~mask; \
138} while (0)
139
140static inline void mcspi_write_reg(struct spi_master *master,
141 int idx, u32 val)
142{
143 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
144
145 __raw_writel(val, mcspi->base + idx);
146}
147
148static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
149{
150 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
151
152 return __raw_readl(mcspi->base + idx);
153}
154
155static inline void mcspi_write_cs_reg(const struct spi_device *spi,
156 int idx, u32 val)
157{
158 struct omap2_mcspi_cs *cs = spi->controller_state;
159
160 __raw_writel(val, cs->base + idx);
161}
162
163static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
164{
165 struct omap2_mcspi_cs *cs = spi->controller_state;
166
167 return __raw_readl(cs->base + idx);
168}
169
170static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
171 int is_read, int enable)
172{
173 u32 l, rw;
174
175 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
176
177 if (is_read) /* 1 is read, 0 write */
178 rw = OMAP2_MCSPI_CHCONF_DMAR;
179 else
180 rw = OMAP2_MCSPI_CHCONF_DMAW;
181
182 MOD_REG_BIT(l, rw, enable);
183 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
184}
185
186static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
187{
188 u32 l;
189
190 l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
191 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
192}
193
194static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
195{
196 u32 l;
197
198 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
199 MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active);
200 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
201}
202
203static void omap2_mcspi_set_master_mode(struct spi_master *master)
204{
205 u32 l;
206
207 /* setup when switching from (reset default) slave mode
208 * to single-channel master mode
209 */
210 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
211 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0);
212 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0);
213 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1);
214 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
215}
216
217static unsigned
218omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
219{
220 struct omap2_mcspi *mcspi;
221 struct omap2_mcspi_cs *cs = spi->controller_state;
222 struct omap2_mcspi_dma *mcspi_dma;
223 unsigned int count, c;
224 unsigned long base, tx_reg, rx_reg;
225 int word_len, data_type, element_count;
226 u8 * rx;
227 const u8 * tx;
228
229 mcspi = spi_master_get_devdata(spi->master);
230 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
231
232 count = xfer->len;
233 c = count;
234 word_len = cs->word_len;
235
236 base = (unsigned long) io_v2p(cs->base);
237 tx_reg = base + OMAP2_MCSPI_TX0;
238 rx_reg = base + OMAP2_MCSPI_RX0;
239 rx = xfer->rx_buf;
240 tx = xfer->tx_buf;
241
242 if (word_len <= 8) {
243 data_type = OMAP_DMA_DATA_TYPE_S8;
244 element_count = count;
245 } else if (word_len <= 16) {
246 data_type = OMAP_DMA_DATA_TYPE_S16;
247 element_count = count >> 1;
248 } else /* word_len <= 32 */ {
249 data_type = OMAP_DMA_DATA_TYPE_S32;
250 element_count = count >> 2;
251 }
252
253 if (tx != NULL) {
254 omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
255 data_type, element_count, 1,
256 OMAP_DMA_SYNC_ELEMENT,
257 mcspi_dma->dma_tx_sync_dev, 0);
258
259 omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
260 OMAP_DMA_AMODE_CONSTANT,
261 tx_reg, 0, 0);
262
263 omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
264 OMAP_DMA_AMODE_POST_INC,
265 xfer->tx_dma, 0, 0);
266 }
267
268 if (rx != NULL) {
269 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
270 data_type, element_count, 1,
271 OMAP_DMA_SYNC_ELEMENT,
272 mcspi_dma->dma_rx_sync_dev, 1);
273
274 omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
275 OMAP_DMA_AMODE_CONSTANT,
276 rx_reg, 0, 0);
277
278 omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
279 OMAP_DMA_AMODE_POST_INC,
280 xfer->rx_dma, 0, 0);
281 }
282
283 if (tx != NULL) {
284 omap_start_dma(mcspi_dma->dma_tx_channel);
285 omap2_mcspi_set_dma_req(spi, 0, 1);
286 }
287
288 if (rx != NULL) {
289 omap_start_dma(mcspi_dma->dma_rx_channel);
290 omap2_mcspi_set_dma_req(spi, 1, 1);
291 }
292
293 if (tx != NULL) {
294 wait_for_completion(&mcspi_dma->dma_tx_completion);
295 dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE);
296 }
297
298 if (rx != NULL) {
299 wait_for_completion(&mcspi_dma->dma_rx_completion);
300 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE);
301 }
302 return count;
303}
304
305static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
306{
307 unsigned long timeout;
308
309 timeout = jiffies + msecs_to_jiffies(1000);
310 while (!(__raw_readl(reg) & bit)) {
311 if (time_after(jiffies, timeout))
312 return -1;
313 cpu_relax();
314 }
315 return 0;
316}
317
318static unsigned
319omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
320{
321 struct omap2_mcspi *mcspi;
322 struct omap2_mcspi_cs *cs = spi->controller_state;
323 unsigned int count, c;
324 u32 l;
325 void __iomem *base = cs->base;
326 void __iomem *tx_reg;
327 void __iomem *rx_reg;
328 void __iomem *chstat_reg;
329 int word_len;
330
331 mcspi = spi_master_get_devdata(spi->master);
332 count = xfer->len;
333 c = count;
334 word_len = cs->word_len;
335
336 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
337 l &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
338
339 /* We store the pre-calculated register addresses on stack to speed
340 * up the transfer loop. */
341 tx_reg = base + OMAP2_MCSPI_TX0;
342 rx_reg = base + OMAP2_MCSPI_RX0;
343 chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
344
345 if (word_len <= 8) {
346 u8 *rx;
347 const u8 *tx;
348
349 rx = xfer->rx_buf;
350 tx = xfer->tx_buf;
351
352 do {
353 if (tx != NULL) {
354 if (mcspi_wait_for_reg_bit(chstat_reg,
355 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
356 dev_err(&spi->dev, "TXS timed out\n");
357 goto out;
358 }
359#ifdef VERBOSE
360 dev_dbg(&spi->dev, "write-%d %02x\n",
361 word_len, *tx);
362#endif
363 __raw_writel(*tx++, tx_reg);
364 }
365 if (rx != NULL) {
366 if (mcspi_wait_for_reg_bit(chstat_reg,
367 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
368 dev_err(&spi->dev, "RXS timed out\n");
369 goto out;
370 }
371 /* prevent last RX_ONLY read from triggering
372 * more word i/o: switch to rx+tx
373 */
374 if (c == 0 && tx == NULL)
375 mcspi_write_cs_reg(spi,
376 OMAP2_MCSPI_CHCONF0, l);
377 *rx++ = __raw_readl(rx_reg);
378#ifdef VERBOSE
379 dev_dbg(&spi->dev, "read-%d %02x\n",
380 word_len, *(rx - 1));
381#endif
382 }
383 c -= 1;
384 } while (c);
385 } else if (word_len <= 16) {
386 u16 *rx;
387 const u16 *tx;
388
389 rx = xfer->rx_buf;
390 tx = xfer->tx_buf;
391 do {
392 if (tx != NULL) {
393 if (mcspi_wait_for_reg_bit(chstat_reg,
394 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
395 dev_err(&spi->dev, "TXS timed out\n");
396 goto out;
397 }
398#ifdef VERBOSE
399 dev_dbg(&spi->dev, "write-%d %04x\n",
400 word_len, *tx);
401#endif
402 __raw_writel(*tx++, tx_reg);
403 }
404 if (rx != NULL) {
405 if (mcspi_wait_for_reg_bit(chstat_reg,
406 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
407 dev_err(&spi->dev, "RXS timed out\n");
408 goto out;
409 }
410 /* prevent last RX_ONLY read from triggering
411 * more word i/o: switch to rx+tx
412 */
413 if (c == 0 && tx == NULL)
414 mcspi_write_cs_reg(spi,
415 OMAP2_MCSPI_CHCONF0, l);
416 *rx++ = __raw_readl(rx_reg);
417#ifdef VERBOSE
418 dev_dbg(&spi->dev, "read-%d %04x\n",
419 word_len, *(rx - 1));
420#endif
421 }
422 c -= 2;
423 } while (c);
424 } else if (word_len <= 32) {
425 u32 *rx;
426 const u32 *tx;
427
428 rx = xfer->rx_buf;
429 tx = xfer->tx_buf;
430 do {
431 if (tx != NULL) {
432 if (mcspi_wait_for_reg_bit(chstat_reg,
433 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
434 dev_err(&spi->dev, "TXS timed out\n");
435 goto out;
436 }
437#ifdef VERBOSE
438 dev_dbg(&spi->dev, "write-%d %04x\n",
439 word_len, *tx);
440#endif
441 __raw_writel(*tx++, tx_reg);
442 }
443 if (rx != NULL) {
444 if (mcspi_wait_for_reg_bit(chstat_reg,
445 OMAP2_MCSPI_CHSTAT_RXS) < 0) {
446 dev_err(&spi->dev, "RXS timed out\n");
447 goto out;
448 }
449 /* prevent last RX_ONLY read from triggering
450 * more word i/o: switch to rx+tx
451 */
452 if (c == 0 && tx == NULL)
453 mcspi_write_cs_reg(spi,
454 OMAP2_MCSPI_CHCONF0, l);
455 *rx++ = __raw_readl(rx_reg);
456#ifdef VERBOSE
457 dev_dbg(&spi->dev, "read-%d %04x\n",
458 word_len, *(rx - 1));
459#endif
460 }
461 c -= 4;
462 } while (c);
463 }
464
465 /* for TX_ONLY mode, be sure all words have shifted out */
466 if (xfer->rx_buf == NULL) {
467 if (mcspi_wait_for_reg_bit(chstat_reg,
468 OMAP2_MCSPI_CHSTAT_TXS) < 0) {
469 dev_err(&spi->dev, "TXS timed out\n");
470 } else if (mcspi_wait_for_reg_bit(chstat_reg,
471 OMAP2_MCSPI_CHSTAT_EOT) < 0)
472 dev_err(&spi->dev, "EOT timed out\n");
473 }
474out:
475 return count - c;
476}
477
478/* called only when no transfer is active to this device */
479static int omap2_mcspi_setup_transfer(struct spi_device *spi,
480 struct spi_transfer *t)
481{
482 struct omap2_mcspi_cs *cs = spi->controller_state;
483 struct omap2_mcspi *mcspi;
484 u32 l = 0, div = 0;
485 u8 word_len = spi->bits_per_word;
486
487 mcspi = spi_master_get_devdata(spi->master);
488
489 if (t != NULL && t->bits_per_word)
490 word_len = t->bits_per_word;
491
492 cs->word_len = word_len;
493
494 if (spi->max_speed_hz) {
495 while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
496 > spi->max_speed_hz)
497 div++;
498 } else
499 div = 15;
500
501 l = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
502
503 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
504 * REVISIT: this controller could support SPI_3WIRE mode.
505 */
506 l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1);
507 l |= OMAP2_MCSPI_CHCONF_DPE0;
508
509 /* wordlength */
510 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
511 l |= (word_len - 1) << 7;
512
513 /* set chipselect polarity; manage with FORCE */
514 if (!(spi->mode & SPI_CS_HIGH))
515 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
516 else
517 l &= ~OMAP2_MCSPI_CHCONF_EPOL;
518
519 /* set clock divisor */
520 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
521 l |= div << 2;
522
523 /* set SPI mode 0..3 */
524 if (spi->mode & SPI_CPOL)
525 l |= OMAP2_MCSPI_CHCONF_POL;
526 else
527 l &= ~OMAP2_MCSPI_CHCONF_POL;
528 if (spi->mode & SPI_CPHA)
529 l |= OMAP2_MCSPI_CHCONF_PHA;
530 else
531 l &= ~OMAP2_MCSPI_CHCONF_PHA;
532
533 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, l);
534
535 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
536 OMAP2_MCSPI_MAX_FREQ / (1 << div),
537 (spi->mode & SPI_CPHA) ? "trailing" : "leading",
538 (spi->mode & SPI_CPOL) ? "inverted" : "normal");
539
540 return 0;
541}
542
543static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
544{
545 struct spi_device *spi = data;
546 struct omap2_mcspi *mcspi;
547 struct omap2_mcspi_dma *mcspi_dma;
548
549 mcspi = spi_master_get_devdata(spi->master);
550 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
551
552 complete(&mcspi_dma->dma_rx_completion);
553
554 /* We must disable the DMA RX request */
555 omap2_mcspi_set_dma_req(spi, 1, 0);
556}
557
558static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
559{
560 struct spi_device *spi = data;
561 struct omap2_mcspi *mcspi;
562 struct omap2_mcspi_dma *mcspi_dma;
563
564 mcspi = spi_master_get_devdata(spi->master);
565 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
566
567 complete(&mcspi_dma->dma_tx_completion);
568
569 /* We must disable the DMA TX request */
570 omap2_mcspi_set_dma_req(spi, 0, 0);
571}
572
573static int omap2_mcspi_request_dma(struct spi_device *spi)
574{
575 struct spi_master *master = spi->master;
576 struct omap2_mcspi *mcspi;
577 struct omap2_mcspi_dma *mcspi_dma;
578
579 mcspi = spi_master_get_devdata(master);
580 mcspi_dma = mcspi->dma_channels + spi->chip_select;
581
582 if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
583 omap2_mcspi_dma_rx_callback, spi,
584 &mcspi_dma->dma_rx_channel)) {
585 dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
586 return -EAGAIN;
587 }
588
589 if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
590 omap2_mcspi_dma_tx_callback, spi,
591 &mcspi_dma->dma_tx_channel)) {
592 omap_free_dma(mcspi_dma->dma_rx_channel);
593 mcspi_dma->dma_rx_channel = -1;
594 dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
595 return -EAGAIN;
596 }
597
598 init_completion(&mcspi_dma->dma_rx_completion);
599 init_completion(&mcspi_dma->dma_tx_completion);
600
601 return 0;
602}
603
604/* the spi->mode bits understood by this driver: */
605#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
606
607static int omap2_mcspi_setup(struct spi_device *spi)
608{
609 int ret;
610 struct omap2_mcspi *mcspi;
611 struct omap2_mcspi_dma *mcspi_dma;
612 struct omap2_mcspi_cs *cs = spi->controller_state;
613
614 if (spi->mode & ~MODEBITS) {
615 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
616 spi->mode & ~MODEBITS);
617 return -EINVAL;
618 }
619
620 if (spi->bits_per_word == 0)
621 spi->bits_per_word = 8;
622 else if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
623 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
624 spi->bits_per_word);
625 return -EINVAL;
626 }
627
628 mcspi = spi_master_get_devdata(spi->master);
629 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
630
631 if (!cs) {
632 cs = kzalloc(sizeof *cs, GFP_KERNEL);
633 if (!cs)
634 return -ENOMEM;
635 cs->base = mcspi->base + spi->chip_select * 0x14;
636 spi->controller_state = cs;
637 }
638
639 if (mcspi_dma->dma_rx_channel == -1
640 || mcspi_dma->dma_tx_channel == -1) {
641 ret = omap2_mcspi_request_dma(spi);
642 if (ret < 0)
643 return ret;
644 }
645
646 clk_enable(mcspi->ick);
647 clk_enable(mcspi->fck);
648 ret = omap2_mcspi_setup_transfer(spi, NULL);
649 clk_disable(mcspi->fck);
650 clk_disable(mcspi->ick);
651
652 return ret;
653}
654
655static void omap2_mcspi_cleanup(struct spi_device *spi)
656{
657 struct omap2_mcspi *mcspi;
658 struct omap2_mcspi_dma *mcspi_dma;
659
660 mcspi = spi_master_get_devdata(spi->master);
661 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
662
663 kfree(spi->controller_state);
664
665 if (mcspi_dma->dma_rx_channel != -1) {
666 omap_free_dma(mcspi_dma->dma_rx_channel);
667 mcspi_dma->dma_rx_channel = -1;
668 }
669 if (mcspi_dma->dma_tx_channel != -1) {
670 omap_free_dma(mcspi_dma->dma_tx_channel);
671 mcspi_dma->dma_tx_channel = -1;
672 }
673}
674
675static void omap2_mcspi_work(struct work_struct *work)
676{
677 struct omap2_mcspi *mcspi;
678
679 mcspi = container_of(work, struct omap2_mcspi, work);
680 spin_lock_irq(&mcspi->lock);
681
682 clk_enable(mcspi->ick);
683 clk_enable(mcspi->fck);
684
685 /* We only enable one channel at a time -- the one whose message is
686 * at the head of the queue -- although this controller would gladly
687 * arbitrate among multiple channels. This corresponds to "single
688 * channel" master mode. As a side effect, we need to manage the
689 * chipselect with the FORCE bit ... CS != channel enable.
690 */
691 while (!list_empty(&mcspi->msg_queue)) {
692 struct spi_message *m;
693 struct spi_device *spi;
694 struct spi_transfer *t = NULL;
695 int cs_active = 0;
696 struct omap2_mcspi_device_config *conf;
697 struct omap2_mcspi_cs *cs;
698 int par_override = 0;
699 int status = 0;
700 u32 chconf;
701
702 m = container_of(mcspi->msg_queue.next, struct spi_message,
703 queue);
704
705 list_del_init(&m->queue);
706 spin_unlock_irq(&mcspi->lock);
707
708 spi = m->spi;
709 conf = spi->controller_data;
710 cs = spi->controller_state;
711
712 omap2_mcspi_set_enable(spi, 1);
713 list_for_each_entry(t, &m->transfers, transfer_list) {
714 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
715 status = -EINVAL;
716 break;
717 }
718 if (par_override || t->speed_hz || t->bits_per_word) {
719 par_override = 1;
720 status = omap2_mcspi_setup_transfer(spi, t);
721 if (status < 0)
722 break;
723 if (!t->speed_hz && !t->bits_per_word)
724 par_override = 0;
725 }
726
727 if (!cs_active) {
728 omap2_mcspi_force_cs(spi, 1);
729 cs_active = 1;
730 }
731
732 chconf = mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
733 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
734 if (t->tx_buf == NULL)
735 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
736 else if (t->rx_buf == NULL)
737 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
738 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, chconf);
739
740 if (t->len) {
741 unsigned count;
742
743 /* RX_ONLY mode needs dummy data in TX reg */
744 if (t->tx_buf == NULL)
745 __raw_writel(0, cs->base
746 + OMAP2_MCSPI_TX0);
747
748 if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)
749 count = omap2_mcspi_txrx_dma(spi, t);
750 else
751 count = omap2_mcspi_txrx_pio(spi, t);
752 m->actual_length += count;
753
754 if (count != t->len) {
755 status = -EIO;
756 break;
757 }
758 }
759
760 if (t->delay_usecs)
761 udelay(t->delay_usecs);
762
763 /* ignore the "leave it on after last xfer" hint */
764 if (t->cs_change) {
765 omap2_mcspi_force_cs(spi, 0);
766 cs_active = 0;
767 }
768 }
769
770 /* Restore defaults if they were overriden */
771 if (par_override) {
772 par_override = 0;
773 status = omap2_mcspi_setup_transfer(spi, NULL);
774 }
775
776 if (cs_active)
777 omap2_mcspi_force_cs(spi, 0);
778
779 omap2_mcspi_set_enable(spi, 0);
780
781 m->status = status;
782 m->complete(m->context);
783
784 spin_lock_irq(&mcspi->lock);
785 }
786
787 clk_disable(mcspi->fck);
788 clk_disable(mcspi->ick);
789
790 spin_unlock_irq(&mcspi->lock);
791}
792
793static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
794{
795 struct omap2_mcspi *mcspi;
796 unsigned long flags;
797 struct spi_transfer *t;
798
799 m->actual_length = 0;
800 m->status = 0;
801
802 /* reject invalid messages and transfers */
803 if (list_empty(&m->transfers) || !m->complete)
804 return -EINVAL;
805 list_for_each_entry(t, &m->transfers, transfer_list) {
806 const void *tx_buf = t->tx_buf;
807 void *rx_buf = t->rx_buf;
808 unsigned len = t->len;
809
810 if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
811 || (len && !(rx_buf || tx_buf))
812 || (t->bits_per_word &&
813 ( t->bits_per_word < 4
814 || t->bits_per_word > 32))) {
815 dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
816 t->speed_hz,
817 len,
818 tx_buf ? "tx" : "",
819 rx_buf ? "rx" : "",
820 t->bits_per_word);
821 return -EINVAL;
822 }
823 if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) {
824 dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
825 t->speed_hz,
826 OMAP2_MCSPI_MAX_FREQ/(1<<16));
827 return -EINVAL;
828 }
829
830 if (m->is_dma_mapped || len < DMA_MIN_BYTES)
831 continue;
832
833 /* Do DMA mapping "early" for better error reporting and
834 * dcache use. Note that if dma_unmap_single() ever starts
835 * to do real work on ARM, we'd need to clean up mappings
836 * for previous transfers on *ALL* exits of this loop...
837 */
838 if (tx_buf != NULL) {
839 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
840 len, DMA_TO_DEVICE);
841 if (dma_mapping_error(t->tx_dma)) {
842 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
843 'T', len);
844 return -EINVAL;
845 }
846 }
847 if (rx_buf != NULL) {
848 t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
849 DMA_FROM_DEVICE);
850 if (dma_mapping_error(t->rx_dma)) {
851 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
852 'R', len);
853 if (tx_buf != NULL)
854 dma_unmap_single(NULL, t->tx_dma,
855 len, DMA_TO_DEVICE);
856 return -EINVAL;
857 }
858 }
859 }
860
861 mcspi = spi_master_get_devdata(spi->master);
862
863 spin_lock_irqsave(&mcspi->lock, flags);
864 list_add_tail(&m->queue, &mcspi->msg_queue);
865 queue_work(omap2_mcspi_wq, &mcspi->work);
866 spin_unlock_irqrestore(&mcspi->lock, flags);
867
868 return 0;
869}
870
871static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi)
872{
873 struct spi_master *master = mcspi->master;
874 u32 tmp;
875
876 clk_enable(mcspi->ick);
877 clk_enable(mcspi->fck);
878
879 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
880 OMAP2_MCSPI_SYSCONFIG_SOFTRESET);
881 do {
882 tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS);
883 } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
884
885 mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG,
886 /* (3 << 8) | (2 << 3) | */
887 OMAP2_MCSPI_SYSCONFIG_AUTOIDLE);
888
889 omap2_mcspi_set_master_mode(master);
890
891 clk_disable(mcspi->fck);
892 clk_disable(mcspi->ick);
893 return 0;
894}
895
896static u8 __initdata spi1_rxdma_id [] = {
897 OMAP24XX_DMA_SPI1_RX0,
898 OMAP24XX_DMA_SPI1_RX1,
899 OMAP24XX_DMA_SPI1_RX2,
900 OMAP24XX_DMA_SPI1_RX3,
901};
902
903static u8 __initdata spi1_txdma_id [] = {
904 OMAP24XX_DMA_SPI1_TX0,
905 OMAP24XX_DMA_SPI1_TX1,
906 OMAP24XX_DMA_SPI1_TX2,
907 OMAP24XX_DMA_SPI1_TX3,
908};
909
910static u8 __initdata spi2_rxdma_id[] = {
911 OMAP24XX_DMA_SPI2_RX0,
912 OMAP24XX_DMA_SPI2_RX1,
913};
914
915static u8 __initdata spi2_txdma_id[] = {
916 OMAP24XX_DMA_SPI2_TX0,
917 OMAP24XX_DMA_SPI2_TX1,
918};
919
920static int __init omap2_mcspi_probe(struct platform_device *pdev)
921{
922 struct spi_master *master;
923 struct omap2_mcspi *mcspi;
924 struct resource *r;
925 int status = 0, i;
926 const u8 *rxdma_id, *txdma_id;
927 unsigned num_chipselect;
928
929 switch (pdev->id) {
930 case 1:
931 rxdma_id = spi1_rxdma_id;
932 txdma_id = spi1_txdma_id;
933 num_chipselect = 4;
934 break;
935 case 2:
936 rxdma_id = spi2_rxdma_id;
937 txdma_id = spi2_txdma_id;
938 num_chipselect = 2;
939 break;
940 /* REVISIT omap2430 has a third McSPI ... */
941 default:
942 return -EINVAL;
943 }
944
945 master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
946 if (master == NULL) {
947 dev_dbg(&pdev->dev, "master allocation failed\n");
948 return -ENOMEM;
949 }
950
951 if (pdev->id != -1)
952 master->bus_num = pdev->id;
953
954 master->setup = omap2_mcspi_setup;
955 master->transfer = omap2_mcspi_transfer;
956 master->cleanup = omap2_mcspi_cleanup;
957 master->num_chipselect = num_chipselect;
958
959 dev_set_drvdata(&pdev->dev, master);
960
961 mcspi = spi_master_get_devdata(master);
962 mcspi->master = master;
963
964 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
965 if (r == NULL) {
966 status = -ENODEV;
967 goto err1;
968 }
969 if (!request_mem_region(r->start, (r->end - r->start) + 1,
970 pdev->dev.bus_id)) {
971 status = -EBUSY;
972 goto err1;
973 }
974
975 mcspi->base = (void __iomem *) io_p2v(r->start);
976
977 INIT_WORK(&mcspi->work, omap2_mcspi_work);
978
979 spin_lock_init(&mcspi->lock);
980 INIT_LIST_HEAD(&mcspi->msg_queue);
981
982 mcspi->ick = clk_get(&pdev->dev, "mcspi_ick");
983 if (IS_ERR(mcspi->ick)) {
984 dev_dbg(&pdev->dev, "can't get mcspi_ick\n");
985 status = PTR_ERR(mcspi->ick);
986 goto err1a;
987 }
988 mcspi->fck = clk_get(&pdev->dev, "mcspi_fck");
989 if (IS_ERR(mcspi->fck)) {
990 dev_dbg(&pdev->dev, "can't get mcspi_fck\n");
991 status = PTR_ERR(mcspi->fck);
992 goto err2;
993 }
994
995 mcspi->dma_channels = kcalloc(master->num_chipselect,
996 sizeof(struct omap2_mcspi_dma),
997 GFP_KERNEL);
998
999 if (mcspi->dma_channels == NULL)
1000 goto err3;
1001
1002 for (i = 0; i < num_chipselect; i++) {
1003 mcspi->dma_channels[i].dma_rx_channel = -1;
1004 mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i];
1005 mcspi->dma_channels[i].dma_tx_channel = -1;
1006 mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i];
1007 }
1008
1009 if (omap2_mcspi_reset(mcspi) < 0)
1010 goto err4;
1011
1012 status = spi_register_master(master);
1013 if (status < 0)
1014 goto err4;
1015
1016 return status;
1017
1018err4:
1019 kfree(mcspi->dma_channels);
1020err3:
1021 clk_put(mcspi->fck);
1022err2:
1023 clk_put(mcspi->ick);
1024err1a:
1025 release_mem_region(r->start, (r->end - r->start) + 1);
1026err1:
1027 spi_master_put(master);
1028 return status;
1029}
1030
1031static int __exit omap2_mcspi_remove(struct platform_device *pdev)
1032{
1033 struct spi_master *master;
1034 struct omap2_mcspi *mcspi;
1035 struct omap2_mcspi_dma *dma_channels;
1036 struct resource *r;
1037
1038 master = dev_get_drvdata(&pdev->dev);
1039 mcspi = spi_master_get_devdata(master);
1040 dma_channels = mcspi->dma_channels;
1041
1042 clk_put(mcspi->fck);
1043 clk_put(mcspi->ick);
1044
1045 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1046 release_mem_region(r->start, (r->end - r->start) + 1);
1047
1048 spi_unregister_master(master);
1049 kfree(dma_channels);
1050
1051 return 0;
1052}
1053
1054static struct platform_driver omap2_mcspi_driver = {
1055 .driver = {
1056 .name = "omap2_mcspi",
1057 .owner = THIS_MODULE,
1058 },
1059 .remove = __exit_p(omap2_mcspi_remove),
1060};
1061
1062
1063static int __init omap2_mcspi_init(void)
1064{
1065 omap2_mcspi_wq = create_singlethread_workqueue(
1066 omap2_mcspi_driver.driver.name);
1067 if (omap2_mcspi_wq == NULL)
1068 return -1;
1069 return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
1070}
1071subsys_initcall(omap2_mcspi_init);
1072
1073static void __exit omap2_mcspi_exit(void)
1074{
1075 platform_driver_unregister(&omap2_mcspi_driver);
1076
1077 destroy_workqueue(omap2_mcspi_wq);
1078}
1079module_exit(omap2_mcspi_exit);
1080
1081MODULE_LICENSE("GPL");
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index 95183e1df525..d275c615a73e 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -445,10 +445,19 @@ done:
445 return status; 445 return status;
446} 446}
447 447
448/* the spi->mode bits understood by this driver: */
449#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
450
448static int uwire_setup(struct spi_device *spi) 451static int uwire_setup(struct spi_device *spi)
449{ 452{
450 struct uwire_state *ust = spi->controller_state; 453 struct uwire_state *ust = spi->controller_state;
451 454
455 if (spi->mode & ~MODEBITS) {
456 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
457 spi->mode & ~MODEBITS);
458 return -EINVAL;
459 }
460
452 if (ust == NULL) { 461 if (ust == NULL) {
453 ust = kzalloc(sizeof(*ust), GFP_KERNEL); 462 ust = kzalloc(sizeof(*ust), GFP_KERNEL);
454 if (ust == NULL) 463 if (ust == NULL)
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 9f2c887ffa04..e51311b2da0b 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1067,6 +1067,9 @@ static int transfer(struct spi_device *spi, struct spi_message *msg)
1067 return 0; 1067 return 0;
1068} 1068}
1069 1069
1070/* the spi->mode bits understood by this driver: */
1071#define MODEBITS (SPI_CPOL | SPI_CPHA)
1072
1070static int setup(struct spi_device *spi) 1073static int setup(struct spi_device *spi)
1071{ 1074{
1072 struct pxa2xx_spi_chip *chip_info = NULL; 1075 struct pxa2xx_spi_chip *chip_info = NULL;
@@ -1093,6 +1096,12 @@ static int setup(struct spi_device *spi)
1093 return -EINVAL; 1096 return -EINVAL;
1094 } 1097 }
1095 1098
1099 if (spi->mode & ~MODEBITS) {
1100 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1101 spi->mode & ~MODEBITS);
1102 return -EINVAL;
1103 }
1104
1096 /* Only alloc on first setup */ 1105 /* Only alloc on first setup */
1097 chip = spi_get_ctldata(spi); 1106 chip = spi_get_ctldata(spi);
1098 if (!chip) { 1107 if (!chip) {
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4831edbae2d5..018884d7a5fa 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
23#include <linux/device.h> 23#include <linux/device.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/cache.h> 25#include <linux/cache.h>
26#include <linux/mutex.h>
26#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
27 28
28 29
@@ -185,7 +186,7 @@ struct boardinfo {
185}; 186};
186 187
187static LIST_HEAD(board_list); 188static LIST_HEAD(board_list);
188static DECLARE_MUTEX(board_lock); 189static DEFINE_MUTEX(board_lock);
189 190
190 191
191/** 192/**
@@ -292,9 +293,9 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
292 bi->n_board_info = n; 293 bi->n_board_info = n;
293 memcpy(bi->board_info, info, n * sizeof *info); 294 memcpy(bi->board_info, info, n * sizeof *info);
294 295
295 down(&board_lock); 296 mutex_lock(&board_lock);
296 list_add_tail(&bi->list, &board_list); 297 list_add_tail(&bi->list, &board_list);
297 up(&board_lock); 298 mutex_unlock(&board_lock);
298 return 0; 299 return 0;
299} 300}
300 301
@@ -308,7 +309,7 @@ scan_boardinfo(struct spi_master *master)
308 struct boardinfo *bi; 309 struct boardinfo *bi;
309 struct device *dev = master->cdev.dev; 310 struct device *dev = master->cdev.dev;
310 311
311 down(&board_lock); 312 mutex_lock(&board_lock);
312 list_for_each_entry(bi, &board_list, list) { 313 list_for_each_entry(bi, &board_list, list) {
313 struct spi_board_info *chip = bi->board_info; 314 struct spi_board_info *chip = bi->board_info;
314 unsigned n; 315 unsigned n;
@@ -330,7 +331,7 @@ scan_boardinfo(struct spi_master *master)
330 (void) spi_new_device(master, chip); 331 (void) spi_new_device(master, chip);
331 } 332 }
332 } 333 }
333 up(&board_lock); 334 mutex_unlock(&board_lock);
334} 335}
335 336
336/*-------------------------------------------------------------------------*/ 337/*-------------------------------------------------------------------------*/
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index 88425e1af4d3..0c85c984ccb4 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -187,12 +187,10 @@ int spi_bitbang_setup(struct spi_device *spi)
187 187
188 bitbang = spi_master_get_devdata(spi->master); 188 bitbang = spi_master_get_devdata(spi->master);
189 189
190 /* REVISIT: some systems will want to support devices using lsb-first 190 /* Bitbangers can support SPI_CS_HIGH, SPI_3WIRE, and so on;
191 * bit encodings on the wire. In pure software that would be trivial, 191 * add those to master->flags, and provide the other support.
192 * just bitbang_txrx_le_cphaX() routines shifting the other way, and
193 * some hardware controllers also have this support.
194 */ 192 */
195 if ((spi->mode & SPI_LSB_FIRST) != 0) 193 if ((spi->mode & ~(SPI_CPOL|SPI_CPHA|bitbang->flags)) != 0)
196 return -EINVAL; 194 return -EINVAL;
197 195
198 if (!cs) { 196 if (!cs) {
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 656be4a5094a..aee9ad6f633c 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1163,6 +1163,9 @@ msg_rejected:
1163 return -EINVAL; 1163 return -EINVAL;
1164} 1164}
1165 1165
1166/* the spi->mode bits understood by this driver: */
1167#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
1168
1166/* On first setup bad values must free chip_data memory since will cause 1169/* On first setup bad values must free chip_data memory since will cause
1167 spi_new_device to fail. Bad value setup from protocol driver are simply not 1170 spi_new_device to fail. Bad value setup from protocol driver are simply not
1168 applied and notified to the calling driver. */ 1171 applied and notified to the calling driver. */
@@ -1174,6 +1177,12 @@ static int setup(struct spi_device *spi)
1174 u32 tmp; 1177 u32 tmp;
1175 int status = 0; 1178 int status = 0;
1176 1179
1180 if (spi->mode & ~MODEBITS) {
1181 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
1182 spi->mode & ~MODEBITS);
1183 return -EINVAL;
1184 }
1185
1177 /* Get controller data */ 1186 /* Get controller data */
1178 chip_info = spi->controller_data; 1187 chip_info = spi->controller_data;
1179 1188
@@ -1245,21 +1254,6 @@ static int setup(struct spi_device *spi)
1245 1254
1246 /* SPI mode */ 1255 /* SPI mode */
1247 tmp = spi->mode; 1256 tmp = spi->mode;
1248 if (tmp & SPI_LSB_FIRST) {
1249 status = -EINVAL;
1250 if (first_setup) {
1251 dev_err(&spi->dev,
1252 "setup - "
1253 "HW doesn't support LSB first transfer\n");
1254 goto err_first_setup;
1255 } else {
1256 dev_err(&spi->dev,
1257 "setup - "
1258 "HW doesn't support LSB first transfer, "
1259 "default to MSB first\n");
1260 spi->mode &= ~SPI_LSB_FIRST;
1261 }
1262 }
1263 if (tmp & SPI_CS_HIGH) { 1257 if (tmp & SPI_CS_HIGH) {
1264 u32_EDIT(chip->control, 1258 u32_EDIT(chip->control,
1265 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH); 1259 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi_lm70llp.c
new file mode 100644
index 000000000000..4ea68ac16115
--- /dev/null
+++ b/drivers/spi/spi_lm70llp.c
@@ -0,0 +1,361 @@
1/*
2 * spi_lm70llp.c - driver for lm70llp eval board for the LM70 sensor
3 *
4 * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/device.h>
26#include <linux/parport.h>
27#include <linux/sysfs.h>
28#include <linux/workqueue.h>
29
30
31#include <linux/spi/spi.h>
32#include <linux/spi/spi_bitbang.h>
33
34
35/*
36 * The LM70 communicates with a host processor using a 3-wire variant of
37 * the SPI/Microwire bus interface. This driver specifically supports an
38 * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
39 * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
40 * master controller driver. The hwmon/lm70 driver is a "SPI protocol
41 * driver", layered on top of this one and usable without the lm70llp.
42 *
43 * The LM70 is a temperature sensor chip from National Semiconductor; its
44 * datasheet is available at http://www.national.com/pf/LM/LM70.html
45 *
46 * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is
47 * (heavily) based on spi-butterfly by David Brownell.
48 *
49 * The LM70 LLP connects to the PC parallel port in the following manner:
50 *
51 * Parallel LM70 LLP
52 * Port Direction JP2 Header
53 * ----------- --------- ------------
54 * D0 2 - -
55 * D1 3 --> V+ 5
56 * D2 4 --> V+ 5
57 * D3 5 --> V+ 5
58 * D4 6 --> V+ 5
59 * D5 7 --> nCS 8
60 * D6 8 --> SCLK 3
61 * D7 9 --> SI/O 5
62 * GND 25 - GND 7
63 * Select 13 <-- SI/O 1
64 *
65 * Note that parport pin 13 actually gets inverted by the transistor
66 * arrangement which lets either the parport or the LM70 drive the
67 * SI/SO signal.
68 */
69
70#define DRVNAME "spi-lm70llp"
71
72#define lm70_INIT 0xBE
73#define SIO 0x10
74#define nCS 0x20
75#define SCLK 0x40
76
77/*-------------------------------------------------------------------------*/
78
79struct spi_lm70llp {
80 struct spi_bitbang bitbang;
81 struct parport *port;
82 struct pardevice *pd;
83 struct spi_device *spidev_lm70;
84 struct spi_board_info info;
85 struct class_device *cdev;
86};
87
88/* REVISIT : ugly global ; provides "exclusive open" facility */
89static struct spi_lm70llp *lm70llp;
90
91
92/*-------------------------------------------------------------------*/
93
94static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
95{
96 return spi->controller_data;
97}
98
99/*---------------------- LM70 LLP eval board-specific inlines follow */
100
101/* NOTE: we don't actually need to reread the output values, since they'll
102 * still be what we wrote before. Plus, going through parport builds in
103 * a ~1ms/operation delay; these SPI transfers could easily be faster.
104 */
105
106static inline void deassertCS(struct spi_lm70llp *pp)
107{
108 u8 data = parport_read_data(pp->port);
109 parport_write_data(pp->port, data | nCS);
110}
111
112static inline void assertCS(struct spi_lm70llp *pp)
113{
114 u8 data = parport_read_data(pp->port);
115 parport_write_data(pp->port, data & ~nCS);
116}
117
118static inline void clkHigh(struct spi_lm70llp *pp)
119{
120 u8 data = parport_read_data(pp->port);
121 parport_write_data(pp->port, data | SCLK);
122}
123
124static inline void clkLow(struct spi_lm70llp *pp)
125{
126 u8 data = parport_read_data(pp->port);
127 parport_write_data(pp->port, data & ~SCLK);
128}
129
130/*------------------------- SPI-LM70-specific inlines ----------------------*/
131
132static inline void spidelay(unsigned d)
133{
134 udelay(d);
135}
136
137static inline void setsck(struct spi_device *s, int is_on)
138{
139 struct spi_lm70llp *pp = spidev_to_pp(s);
140
141 if (is_on)
142 clkHigh(pp);
143 else
144 clkLow(pp);
145}
146
147static inline void setmosi(struct spi_device *s, int is_on)
148{
149 /* FIXME update D7 ... this way we can put the chip
150 * into shutdown mode and read the manufacturer ID,
151 * but we can't put it back into operational mode.
152 */
153}
154
155/*
156 * getmiso:
157 * Why do we return 0 when the SIO line is high and vice-versa?
158 * The fact is, the lm70 eval board from NS (which this driver drives),
159 * is wired in just such a way : when the lm70's SIO goes high, a transistor
160 * switches it to low reflecting this on the parport (pin 13), and vice-versa.
161 */
162static inline int getmiso(struct spi_device *s)
163{
164 struct spi_lm70llp *pp = spidev_to_pp(s);
165 return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 );
166}
167/*--------------------------------------------------------------------*/
168
169#define EXPAND_BITBANG_TXRX 1
170#include <linux/spi/spi_bitbang.h>
171
172static void lm70_chipselect(struct spi_device *spi, int value)
173{
174 struct spi_lm70llp *pp = spidev_to_pp(spi);
175
176 if (value)
177 assertCS(pp);
178 else
179 deassertCS(pp);
180}
181
182/*
183 * Our actual bitbanger routine.
184 */
185static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
186{
187 static u32 sio=0;
188 static int first_time=1;
189
190 /* First time: perform SPI bitbang and return the LSB of
191 * the result of the SPI call.
192 */
193 if (first_time) {
194 sio = bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
195 first_time=0;
196 return (sio & 0x00ff);
197 }
198 /* Return the MSB of the result of the SPI call */
199 else {
200 first_time=1;
201 return (sio >> 8);
202 }
203}
204
205static void spi_lm70llp_attach(struct parport *p)
206{
207 struct pardevice *pd;
208 struct spi_lm70llp *pp;
209 struct spi_master *master;
210 int status;
211
212 if (lm70llp) {
213 printk(KERN_WARNING
214 "%s: spi_lm70llp instance already loaded. Aborting.\n",
215 DRVNAME);
216 return;
217 }
218
219 /* TODO: this just _assumes_ a lm70 is there ... no probe;
220 * the lm70 driver could verify it, reading the manf ID.
221 */
222
223 master = spi_alloc_master(p->physport->dev, sizeof *pp);
224 if (!master) {
225 status = -ENOMEM;
226 goto out_fail;
227 }
228 pp = spi_master_get_devdata(master);
229
230 master->bus_num = -1; /* dynamic alloc of a bus number */
231 master->num_chipselect = 1;
232
233 /*
234 * SPI and bitbang hookup.
235 */
236 pp->bitbang.master = spi_master_get(master);
237 pp->bitbang.chipselect = lm70_chipselect;
238 pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
239 pp->bitbang.flags = SPI_3WIRE;
240
241 /*
242 * Parport hookup
243 */
244 pp->port = p;
245 pd = parport_register_device(p, DRVNAME,
246 NULL, NULL, NULL,
247 PARPORT_FLAG_EXCL, pp);
248 if (!pd) {
249 status = -ENOMEM;
250 goto out_free_master;
251 }
252 pp->pd = pd;
253
254 status = parport_claim(pd);
255 if (status < 0)
256 goto out_parport_unreg;
257
258 /*
259 * Start SPI ...
260 */
261 status = spi_bitbang_start(&pp->bitbang);
262 if (status < 0) {
263 printk(KERN_WARNING
264 "%s: spi_bitbang_start failed with status %d\n",
265 DRVNAME, status);
266 goto out_off_and_release;
267 }
268
269 /*
270 * The modalias name MUST match the device_driver name
271 * for the bus glue code to match and subsequently bind them.
272 * We are binding to the generic drivers/hwmon/lm70.c device
273 * driver.
274 */
275 strcpy(pp->info.modalias, "lm70");
276 pp->info.max_speed_hz = 6 * 1000 * 1000;
277 pp->info.chip_select = 0;
278 pp->info.mode = SPI_3WIRE | SPI_MODE_0;
279
280 /* power up the chip, and let the LM70 control SI/SO */
281 parport_write_data(pp->port, lm70_INIT);
282
283 /* Enable access to our primary data structure via
284 * the board info's (void *)controller_data.
285 */
286 pp->info.controller_data = pp;
287 pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
288 if (pp->spidev_lm70)
289 dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
290 pp->spidev_lm70->dev.bus_id);
291 else {
292 printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
293 status = -ENODEV;
294 goto out_bitbang_stop;
295 }
296 pp->spidev_lm70->bits_per_word = 16;
297
298 lm70llp = pp;
299
300 return;
301
302out_bitbang_stop:
303 spi_bitbang_stop(&pp->bitbang);
304out_off_and_release:
305 /* power down */
306 parport_write_data(pp->port, 0);
307 mdelay(10);
308 parport_release(pp->pd);
309out_parport_unreg:
310 parport_unregister_device(pd);
311out_free_master:
312 (void) spi_master_put(master);
313out_fail:
314 pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status);
315}
316
317static void spi_lm70llp_detach(struct parport *p)
318{
319 struct spi_lm70llp *pp;
320
321 if (!lm70llp || lm70llp->port != p)
322 return;
323
324 pp = lm70llp;
325 spi_bitbang_stop(&pp->bitbang);
326
327 /* power down */
328 parport_write_data(pp->port, 0);
329 msleep(10);
330
331 parport_release(pp->pd);
332 parport_unregister_device(pp->pd);
333
334 (void) spi_master_put(pp->bitbang.master);
335
336 lm70llp = NULL;
337}
338
339
340static struct parport_driver spi_lm70llp_drv = {
341 .name = DRVNAME,
342 .attach = spi_lm70llp_attach,
343 .detach = spi_lm70llp_detach,
344};
345
346static int __init init_spi_lm70llp(void)
347{
348 return parport_register_driver(&spi_lm70llp_drv);
349}
350module_init(init_spi_lm70llp);
351
352static void __exit cleanup_spi_lm70llp(void)
353{
354 parport_unregister_driver(&spi_lm70llp_drv);
355}
356module_exit(cleanup_spi_lm70llp);
357
358MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
359MODULE_DESCRIPTION(
360 "Parport adapter for the National Semiconductor LM70 LLP eval board");
361MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c
index e9798bf7b8c6..3295cfcc9f20 100644
--- a/drivers/spi/spi_mpc83xx.c
+++ b/drivers/spi/spi_mpc83xx.c
@@ -47,6 +47,7 @@ struct mpc83xx_spi_reg {
47#define SPMODE_ENABLE (1 << 24) 47#define SPMODE_ENABLE (1 << 24)
48#define SPMODE_LEN(x) ((x) << 20) 48#define SPMODE_LEN(x) ((x) << 20)
49#define SPMODE_PM(x) ((x) << 16) 49#define SPMODE_PM(x) ((x) << 16)
50#define SPMODE_OP (1 << 14)
50 51
51/* 52/*
52 * Default for SPI Mode: 53 * Default for SPI Mode:
@@ -85,6 +86,11 @@ struct mpc83xx_spi {
85 unsigned nsecs; /* (clock cycle time)/2 */ 86 unsigned nsecs; /* (clock cycle time)/2 */
86 87
87 u32 sysclk; 88 u32 sysclk;
89 u32 rx_shift; /* RX data reg shift when in qe mode */
90 u32 tx_shift; /* TX data reg shift when in qe mode */
91
92 bool qe_mode;
93
88 void (*activate_cs) (u8 cs, u8 polarity); 94 void (*activate_cs) (u8 cs, u8 polarity);
89 void (*deactivate_cs) (u8 cs, u8 polarity); 95 void (*deactivate_cs) (u8 cs, u8 polarity);
90}; 96};
@@ -103,7 +109,7 @@ static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg)
103void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \ 109void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \
104{ \ 110{ \
105 type * rx = mpc83xx_spi->rx; \ 111 type * rx = mpc83xx_spi->rx; \
106 *rx++ = (type)data; \ 112 *rx++ = (type)(data >> mpc83xx_spi->rx_shift); \
107 mpc83xx_spi->rx = rx; \ 113 mpc83xx_spi->rx = rx; \
108} 114}
109 115
@@ -114,7 +120,7 @@ u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \
114 const type * tx = mpc83xx_spi->tx; \ 120 const type * tx = mpc83xx_spi->tx; \
115 if (!tx) \ 121 if (!tx) \
116 return 0; \ 122 return 0; \
117 data = *tx++; \ 123 data = *tx++ << mpc83xx_spi->tx_shift; \
118 mpc83xx_spi->tx = tx; \ 124 mpc83xx_spi->tx = tx; \
119 return data; \ 125 return data; \
120} 126}
@@ -158,6 +164,12 @@ static void mpc83xx_spi_chipselect(struct spi_device *spi, int value)
158 164
159 if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) { 165 if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) {
160 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64); 166 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64);
167 if (pm > 0x0f) {
168 printk(KERN_WARNING "MPC83xx SPI: SPICLK can't be less then a SYSCLK/1024!\n"
169 "Requested SPICLK is %d Hz. Will use %d Hz instead.\n",
170 spi->max_speed_hz, mpc83xx_spi->sysclk / 1024);
171 pm = 0x0f;
172 }
161 regval |= SPMODE_PM(pm) | SPMODE_DIV16; 173 regval |= SPMODE_PM(pm) | SPMODE_DIV16;
162 } else { 174 } else {
163 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4); 175 u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4);
@@ -197,12 +209,22 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
197 || ((bits_per_word > 16) && (bits_per_word != 32))) 209 || ((bits_per_word > 16) && (bits_per_word != 32)))
198 return -EINVAL; 210 return -EINVAL;
199 211
212 mpc83xx_spi->rx_shift = 0;
213 mpc83xx_spi->tx_shift = 0;
200 if (bits_per_word <= 8) { 214 if (bits_per_word <= 8) {
201 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 215 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
202 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 216 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
217 if (mpc83xx_spi->qe_mode) {
218 mpc83xx_spi->rx_shift = 16;
219 mpc83xx_spi->tx_shift = 24;
220 }
203 } else if (bits_per_word <= 16) { 221 } else if (bits_per_word <= 16) {
204 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16; 222 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16;
205 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16; 223 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16;
224 if (mpc83xx_spi->qe_mode) {
225 mpc83xx_spi->rx_shift = 16;
226 mpc83xx_spi->tx_shift = 16;
227 }
206 } else if (bits_per_word <= 32) { 228 } else if (bits_per_word <= 32) {
207 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32; 229 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32;
208 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32; 230 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32;
@@ -232,12 +254,21 @@ int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
232 return 0; 254 return 0;
233} 255}
234 256
257/* the spi->mode bits understood by this driver: */
258#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
259
235static int mpc83xx_spi_setup(struct spi_device *spi) 260static int mpc83xx_spi_setup(struct spi_device *spi)
236{ 261{
237 struct spi_bitbang *bitbang; 262 struct spi_bitbang *bitbang;
238 struct mpc83xx_spi *mpc83xx_spi; 263 struct mpc83xx_spi *mpc83xx_spi;
239 int retval; 264 int retval;
240 265
266 if (spi->mode & ~MODEBITS) {
267 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
268 spi->mode & ~MODEBITS);
269 return -EINVAL;
270 }
271
241 if (!spi->max_speed_hz) 272 if (!spi->max_speed_hz)
242 return -EINVAL; 273 return -EINVAL;
243 274
@@ -371,7 +402,6 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
371 ret = -ENODEV; 402 ret = -ENODEV;
372 goto free_master; 403 goto free_master;
373 } 404 }
374
375 mpc83xx_spi = spi_master_get_devdata(master); 405 mpc83xx_spi = spi_master_get_devdata(master);
376 mpc83xx_spi->bitbang.master = spi_master_get(master); 406 mpc83xx_spi->bitbang.master = spi_master_get(master);
377 mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect; 407 mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect;
@@ -380,9 +410,17 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
380 mpc83xx_spi->sysclk = pdata->sysclk; 410 mpc83xx_spi->sysclk = pdata->sysclk;
381 mpc83xx_spi->activate_cs = pdata->activate_cs; 411 mpc83xx_spi->activate_cs = pdata->activate_cs;
382 mpc83xx_spi->deactivate_cs = pdata->deactivate_cs; 412 mpc83xx_spi->deactivate_cs = pdata->deactivate_cs;
413 mpc83xx_spi->qe_mode = pdata->qe_mode;
383 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; 414 mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8;
384 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; 415 mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8;
385 416
417 mpc83xx_spi->rx_shift = 0;
418 mpc83xx_spi->tx_shift = 0;
419 if (mpc83xx_spi->qe_mode) {
420 mpc83xx_spi->rx_shift = 16;
421 mpc83xx_spi->tx_shift = 24;
422 }
423
386 mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup; 424 mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup;
387 init_completion(&mpc83xx_spi->done); 425 init_completion(&mpc83xx_spi->done);
388 426
@@ -417,6 +455,9 @@ static int __init mpc83xx_spi_probe(struct platform_device *dev)
417 455
418 /* Enable SPI interface */ 456 /* Enable SPI interface */
419 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 457 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
458 if (pdata->qe_mode)
459 regval |= SPMODE_OP;
460
420 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); 461 mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval);
421 462
422 ret = spi_bitbang_start(&mpc83xx_spi->bitbang); 463 ret = spi_bitbang_start(&mpc83xx_spi->bitbang);
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index d5a710f6e445..7071ff8da63e 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -146,6 +146,9 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
146 return 0; 146 return 0;
147} 147}
148 148
149/* the spi->mode bits understood by this driver: */
150#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
151
149static int s3c24xx_spi_setup(struct spi_device *spi) 152static int s3c24xx_spi_setup(struct spi_device *spi)
150{ 153{
151 int ret; 154 int ret;
@@ -153,8 +156,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
153 if (!spi->bits_per_word) 156 if (!spi->bits_per_word)
154 spi->bits_per_word = 8; 157 spi->bits_per_word = 8;
155 158
156 if ((spi->mode & SPI_LSB_FIRST) != 0) 159 if (spi->mode & ~MODEBITS) {
160 dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
161 spi->mode & ~MODEBITS);
157 return -EINVAL; 162 return -EINVAL;
163 }
158 164
159 ret = s3c24xx_spi_setupxfer(spi, NULL); 165 ret = s3c24xx_spi_setupxfer(spi, NULL);
160 if (ret < 0) { 166 if (ret < 0) {
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
new file mode 100644
index 000000000000..08e981c40646
--- /dev/null
+++ b/drivers/spi/spi_txx9.c
@@ -0,0 +1,474 @@
1/*
2 * spi_txx9.c - TXx9 SPI controller driver.
3 *
4 * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
5 * Copyright (C) 2000-2001 Toshiba Corporation
6 *
7 * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
8 * terms of the GNU General Public License version 2. This program is
9 * licensed "as is" without any warranty of any kind, whether express
10 * or implied.
11 *
12 * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
13 *
14 * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
15 */
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/errno.h>
19#include <linux/interrupt.h>
20#include <linux/platform_device.h>
21#include <linux/sched.h>
22#include <linux/spinlock.h>
23#include <linux/workqueue.h>
24#include <linux/spi/spi.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <asm/gpio.h>
28
29
30#define SPI_FIFO_SIZE 4
31
32#define TXx9_SPMCR 0x00
33#define TXx9_SPCR0 0x04
34#define TXx9_SPCR1 0x08
35#define TXx9_SPFS 0x0c
36#define TXx9_SPSR 0x14
37#define TXx9_SPDR 0x18
38
39/* SPMCR : SPI Master Control */
40#define TXx9_SPMCR_OPMODE 0xc0
41#define TXx9_SPMCR_CONFIG 0x40
42#define TXx9_SPMCR_ACTIVE 0x80
43#define TXx9_SPMCR_SPSTP 0x02
44#define TXx9_SPMCR_BCLR 0x01
45
46/* SPCR0 : SPI Control 0 */
47#define TXx9_SPCR0_TXIFL_MASK 0xc000
48#define TXx9_SPCR0_RXIFL_MASK 0x3000
49#define TXx9_SPCR0_SIDIE 0x0800
50#define TXx9_SPCR0_SOEIE 0x0400
51#define TXx9_SPCR0_RBSIE 0x0200
52#define TXx9_SPCR0_TBSIE 0x0100
53#define TXx9_SPCR0_IFSPSE 0x0010
54#define TXx9_SPCR0_SBOS 0x0004
55#define TXx9_SPCR0_SPHA 0x0002
56#define TXx9_SPCR0_SPOL 0x0001
57
58/* SPSR : SPI Status */
59#define TXx9_SPSR_TBSI 0x8000
60#define TXx9_SPSR_RBSI 0x4000
61#define TXx9_SPSR_TBS_MASK 0x3800
62#define TXx9_SPSR_RBS_MASK 0x0700
63#define TXx9_SPSR_SPOE 0x0080
64#define TXx9_SPSR_IFSD 0x0008
65#define TXx9_SPSR_SIDLE 0x0004
66#define TXx9_SPSR_STRDY 0x0002
67#define TXx9_SPSR_SRRDY 0x0001
68
69
70struct txx9spi {
71 struct workqueue_struct *workqueue;
72 struct work_struct work;
73 spinlock_t lock; /* protect 'queue' */
74 struct list_head queue;
75 wait_queue_head_t waitq;
76 void __iomem *membase;
77 int irq;
78 int baseclk;
79 struct clk *clk;
80 u32 max_speed_hz, min_speed_hz;
81 int last_chipselect;
82 int last_chipselect_val;
83};
84
85static u32 txx9spi_rd(struct txx9spi *c, int reg)
86{
87 return __raw_readl(c->membase + reg);
88}
89static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
90{
91 __raw_writel(val, c->membase + reg);
92}
93
94static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
95 int on, unsigned int cs_delay)
96{
97 int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
98 if (on) {
99 /* deselect the chip with cs_change hint in last transfer */
100 if (c->last_chipselect >= 0)
101 gpio_set_value(c->last_chipselect,
102 !c->last_chipselect_val);
103 c->last_chipselect = spi->chip_select;
104 c->last_chipselect_val = val;
105 } else {
106 c->last_chipselect = -1;
107 ndelay(cs_delay); /* CS Hold Time */
108 }
109 gpio_set_value(spi->chip_select, val);
110 ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
111}
112
113/* the spi->mode bits understood by this driver: */
114#define MODEBITS (SPI_CS_HIGH|SPI_CPOL|SPI_CPHA)
115
116static int txx9spi_setup(struct spi_device *spi)
117{
118 struct txx9spi *c = spi_master_get_devdata(spi->master);
119 u8 bits_per_word;
120
121 if (spi->mode & ~MODEBITS)
122 return -EINVAL;
123
124 if (!spi->max_speed_hz
125 || spi->max_speed_hz > c->max_speed_hz
126 || spi->max_speed_hz < c->min_speed_hz)
127 return -EINVAL;
128
129 bits_per_word = spi->bits_per_word ? : 8;
130 if (bits_per_word != 8 && bits_per_word != 16)
131 return -EINVAL;
132
133 if (gpio_direction_output(spi->chip_select,
134 !(spi->mode & SPI_CS_HIGH))) {
135 dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
136 return -EINVAL;
137 }
138
139 /* deselect chip */
140 spin_lock(&c->lock);
141 txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
142 spin_unlock(&c->lock);
143
144 return 0;
145}
146
147static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
148{
149 struct txx9spi *c = dev_id;
150
151 /* disable rx intr */
152 txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
153 TXx9_SPCR0);
154 wake_up(&c->waitq);
155 return IRQ_HANDLED;
156}
157
158static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
159{
160 struct spi_device *spi = m->spi;
161 struct spi_transfer *t;
162 unsigned int cs_delay;
163 unsigned int cs_change = 1;
164 int status = 0;
165 u32 mcr;
166 u32 prev_speed_hz = 0;
167 u8 prev_bits_per_word = 0;
168
169 /* CS setup/hold/recovery time in nsec */
170 cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
171
172 mcr = txx9spi_rd(c, TXx9_SPMCR);
173 if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
174 dev_err(&spi->dev, "Bad mode.\n");
175 status = -EIO;
176 goto exit;
177 }
178 mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
179
180 /* enter config mode */
181 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
182 txx9spi_wr(c, TXx9_SPCR0_SBOS
183 | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
184 | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
185 | 0x08,
186 TXx9_SPCR0);
187
188 list_for_each_entry (t, &m->transfers, transfer_list) {
189 const void *txbuf = t->tx_buf;
190 void *rxbuf = t->rx_buf;
191 u32 data;
192 unsigned int len = t->len;
193 unsigned int wsize;
194 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
195 u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
196
197 bits_per_word = bits_per_word ? : 8;
198 wsize = bits_per_word >> 3; /* in bytes */
199
200 if (prev_speed_hz != speed_hz
201 || prev_bits_per_word != bits_per_word) {
202 u32 n = (c->baseclk + speed_hz - 1) / speed_hz;
203 if (n < 1)
204 n = 1;
205 else if (n > 0xff)
206 n = 0xff;
207 /* enter config mode */
208 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
209 TXx9_SPMCR);
210 txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
211 /* enter active mode */
212 txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
213
214 prev_speed_hz = speed_hz;
215 prev_bits_per_word = bits_per_word;
216 }
217
218 if (cs_change)
219 txx9spi_cs_func(spi, c, 1, cs_delay);
220 cs_change = t->cs_change;
221 while (len) {
222 unsigned int count = SPI_FIFO_SIZE;
223 int i;
224 u32 cr0;
225
226 if (len < count * wsize)
227 count = len / wsize;
228 /* now tx must be idle... */
229 while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
230 cpu_relax();
231 cr0 = txx9spi_rd(c, TXx9_SPCR0);
232 cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
233 cr0 |= (count - 1) << 12;
234 /* enable rx intr */
235 cr0 |= TXx9_SPCR0_RBSIE;
236 txx9spi_wr(c, cr0, TXx9_SPCR0);
237 /* send */
238 for (i = 0; i < count; i++) {
239 if (txbuf) {
240 data = (wsize == 1)
241 ? *(const u8 *)txbuf
242 : *(const u16 *)txbuf;
243 txx9spi_wr(c, data, TXx9_SPDR);
244 txbuf += wsize;
245 } else
246 txx9spi_wr(c, 0, TXx9_SPDR);
247 }
248 /* wait all rx data */
249 wait_event(c->waitq,
250 txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
251 /* receive */
252 for (i = 0; i < count; i++) {
253 data = txx9spi_rd(c, TXx9_SPDR);
254 if (rxbuf) {
255 if (wsize == 1)
256 *(u8 *)rxbuf = data;
257 else
258 *(u16 *)rxbuf = data;
259 rxbuf += wsize;
260 }
261 }
262 len -= count * wsize;
263 }
264 m->actual_length += t->len;
265 if (t->delay_usecs)
266 udelay(t->delay_usecs);
267
268 if (!cs_change)
269 continue;
270 if (t->transfer_list.next == &m->transfers)
271 break;
272 /* sometimes a short mid-message deselect of the chip
273 * may be needed to terminate a mode or command
274 */
275 txx9spi_cs_func(spi, c, 0, cs_delay);
276 }
277
278exit:
279 m->status = status;
280 m->complete(m->context);
281
282 /* normally deactivate chipselect ... unless no error and
283 * cs_change has hinted that the next message will probably
284 * be for this chip too.
285 */
286 if (!(status == 0 && cs_change))
287 txx9spi_cs_func(spi, c, 0, cs_delay);
288
289 /* enter config mode */
290 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
291}
292
293static void txx9spi_work(struct work_struct *work)
294{
295 struct txx9spi *c = container_of(work, struct txx9spi, work);
296 unsigned long flags;
297
298 spin_lock_irqsave(&c->lock, flags);
299 while (!list_empty(&c->queue)) {
300 struct spi_message *m;
301
302 m = container_of(c->queue.next, struct spi_message, queue);
303 list_del_init(&m->queue);
304 spin_unlock_irqrestore(&c->lock, flags);
305
306 txx9spi_work_one(c, m);
307
308 spin_lock_irqsave(&c->lock, flags);
309 }
310 spin_unlock_irqrestore(&c->lock, flags);
311}
312
313static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
314{
315 struct spi_master *master = spi->master;
316 struct txx9spi *c = spi_master_get_devdata(master);
317 struct spi_transfer *t;
318 unsigned long flags;
319
320 m->actual_length = 0;
321
322 /* check each transfer's parameters */
323 list_for_each_entry (t, &m->transfers, transfer_list) {
324 u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
325 u8 bits_per_word = t->bits_per_word ? : spi->bits_per_word;
326
327 bits_per_word = bits_per_word ? : 8;
328 if (!t->tx_buf && !t->rx_buf && t->len)
329 return -EINVAL;
330 if (bits_per_word != 8 && bits_per_word != 16)
331 return -EINVAL;
332 if (t->len & ((bits_per_word >> 3) - 1))
333 return -EINVAL;
334 if (speed_hz < c->min_speed_hz || speed_hz > c->max_speed_hz)
335 return -EINVAL;
336 }
337
338 spin_lock_irqsave(&c->lock, flags);
339 list_add_tail(&m->queue, &c->queue);
340 queue_work(c->workqueue, &c->work);
341 spin_unlock_irqrestore(&c->lock, flags);
342
343 return 0;
344}
345
346static int __init txx9spi_probe(struct platform_device *dev)
347{
348 struct spi_master *master;
349 struct txx9spi *c;
350 struct resource *res;
351 int ret = -ENODEV;
352 u32 mcr;
353
354 master = spi_alloc_master(&dev->dev, sizeof(*c));
355 if (!master)
356 return ret;
357 c = spi_master_get_devdata(master);
358 c->irq = -1;
359 platform_set_drvdata(dev, master);
360
361 INIT_WORK(&c->work, txx9spi_work);
362 spin_lock_init(&c->lock);
363 INIT_LIST_HEAD(&c->queue);
364 init_waitqueue_head(&c->waitq);
365
366 c->clk = clk_get(&dev->dev, "spi-baseclk");
367 if (IS_ERR(c->clk)) {
368 ret = PTR_ERR(c->clk);
369 c->clk = NULL;
370 goto exit;
371 }
372 ret = clk_enable(c->clk);
373 if (ret) {
374 clk_put(c->clk);
375 c->clk = NULL;
376 goto exit;
377 }
378 c->baseclk = clk_get_rate(c->clk);
379 c->min_speed_hz = (c->baseclk + 0xff - 1) / 0xff;
380 c->max_speed_hz = c->baseclk;
381
382 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
383 if (!res)
384 goto exit;
385 c->membase = ioremap(res->start, res->end - res->start + 1);
386 if (!c->membase)
387 goto exit;
388
389 /* enter config mode */
390 mcr = txx9spi_rd(c, TXx9_SPMCR);
391 mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
392 txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
393
394 c->irq = platform_get_irq(dev, 0);
395 if (c->irq < 0)
396 goto exit;
397 ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c);
398 if (ret) {
399 c->irq = -1;
400 goto exit;
401 }
402
403 c->workqueue = create_singlethread_workqueue(master->cdev.dev->bus_id);
404 if (!c->workqueue)
405 goto exit;
406 c->last_chipselect = -1;
407
408 dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
409 (unsigned long long)res->start, c->irq,
410 (c->baseclk + 500000) / 1000000);
411
412 master->bus_num = dev->id;
413 master->setup = txx9spi_setup;
414 master->transfer = txx9spi_transfer;
415 master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
416
417 ret = spi_register_master(master);
418 if (ret)
419 goto exit;
420 return 0;
421exit:
422 if (c->workqueue)
423 destroy_workqueue(c->workqueue);
424 if (c->irq >= 0)
425 free_irq(c->irq, c);
426 if (c->membase)
427 iounmap(c->membase);
428 if (c->clk) {
429 clk_disable(c->clk);
430 clk_put(c->clk);
431 }
432 platform_set_drvdata(dev, NULL);
433 spi_master_put(master);
434 return ret;
435}
436
437static int __exit txx9spi_remove(struct platform_device *dev)
438{
439 struct spi_master *master = spi_master_get(platform_get_drvdata(dev));
440 struct txx9spi *c = spi_master_get_devdata(master);
441
442 spi_unregister_master(master);
443 platform_set_drvdata(dev, NULL);
444 destroy_workqueue(c->workqueue);
445 free_irq(c->irq, c);
446 iounmap(c->membase);
447 clk_disable(c->clk);
448 clk_put(c->clk);
449 spi_master_put(master);
450 return 0;
451}
452
453static struct platform_driver txx9spi_driver = {
454 .remove = __exit_p(txx9spi_remove),
455 .driver = {
456 .name = "txx9spi",
457 .owner = THIS_MODULE,
458 },
459};
460
461static int __init txx9spi_init(void)
462{
463 return platform_driver_probe(&txx9spi_driver, txx9spi_probe);
464}
465subsys_initcall(txx9spi_init);
466
467static void __exit txx9spi_exit(void)
468{
469 platform_driver_unregister(&txx9spi_driver);
470}
471module_exit(txx9spi_exit);
472
473MODULE_DESCRIPTION("TXx9 SPI Driver");
474MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d04242aee40d..38b60ad0eda0 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -181,7 +181,8 @@ static int spidev_message(struct spidev_data *spidev,
181 } 181 }
182 if (u_tmp->tx_buf) { 182 if (u_tmp->tx_buf) {
183 k_tmp->tx_buf = buf; 183 k_tmp->tx_buf = buf;
184 if (copy_from_user(buf, (const u8 __user *)u_tmp->tx_buf, 184 if (copy_from_user(buf, (const u8 __user *)
185 (ptrdiff_t) u_tmp->tx_buf,
185 u_tmp->len)) 186 u_tmp->len))
186 goto done; 187 goto done;
187 } 188 }
@@ -213,7 +214,8 @@ static int spidev_message(struct spidev_data *spidev,
213 buf = spidev->buffer; 214 buf = spidev->buffer;
214 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { 215 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
215 if (u_tmp->rx_buf) { 216 if (u_tmp->rx_buf) {
216 if (__copy_to_user((u8 __user *)u_tmp->rx_buf, buf, 217 if (__copy_to_user((u8 __user *)
218 (ptrdiff_t) u_tmp->rx_buf, buf,
217 u_tmp->len)) { 219 u_tmp->len)) {
218 status = -EFAULT; 220 status = -EFAULT;
219 goto done; 221 goto done;
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
new file mode 100644
index 000000000000..6da58ca48b33
--- /dev/null
+++ b/drivers/spi/tle62x0.c
@@ -0,0 +1,328 @@
1/*
2 * tle62x0.c -- support Infineon TLE62x0 driver chips
3 *
4 * Copyright (c) 2007 Simtec Electronics
5 * Ben Dooks, <ben@simtec.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/device.h>
13#include <linux/kernel.h>
14
15#include <linux/spi/spi.h>
16#include <linux/spi/tle62x0.h>
17
18
19#define CMD_READ 0x00
20#define CMD_SET 0xff
21
22#define DIAG_NORMAL 0x03
23#define DIAG_OVERLOAD 0x02
24#define DIAG_OPEN 0x01
25#define DIAG_SHORTGND 0x00
26
27struct tle62x0_state {
28 struct spi_device *us;
29 struct mutex lock;
30 unsigned int nr_gpio;
31 unsigned int gpio_state;
32
33 unsigned char tx_buff[4];
34 unsigned char rx_buff[4];
35};
36
37static int to_gpio_num(struct device_attribute *attr);
38
39static inline int tle62x0_write(struct tle62x0_state *st)
40{
41 unsigned char *buff = st->tx_buff;
42 unsigned int gpio_state = st->gpio_state;
43
44 buff[0] = CMD_SET;
45
46 if (st->nr_gpio == 16) {
47 buff[1] = gpio_state >> 8;
48 buff[2] = gpio_state;
49 } else {
50 buff[1] = gpio_state;
51 }
52
53 dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
54 buff[0], buff[1], buff[2]);
55
56 return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
57}
58
59static inline int tle62x0_read(struct tle62x0_state *st)
60{
61 unsigned char *txbuff = st->tx_buff;
62 struct spi_transfer xfer = {
63 .tx_buf = txbuff,
64 .rx_buf = st->rx_buff,
65 .len = (st->nr_gpio * 2) / 8,
66 };
67 struct spi_message msg;
68
69 txbuff[0] = CMD_READ;
70 txbuff[1] = 0x00;
71 txbuff[2] = 0x00;
72 txbuff[3] = 0x00;
73
74 spi_message_init(&msg);
75 spi_message_add_tail(&xfer, &msg);
76
77 return spi_sync(st->us, &msg);
78}
79
80static unsigned char *decode_fault(unsigned int fault_code)
81{
82 fault_code &= 3;
83
84 switch (fault_code) {
85 case DIAG_NORMAL:
86 return "N";
87 case DIAG_OVERLOAD:
88 return "V";
89 case DIAG_OPEN:
90 return "O";
91 case DIAG_SHORTGND:
92 return "G";
93 }
94
95 return "?";
96}
97
98static ssize_t tle62x0_status_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct tle62x0_state *st = dev_get_drvdata(dev);
102 char *bp = buf;
103 unsigned char *buff = st->rx_buff;
104 unsigned long fault = 0;
105 int ptr;
106 int ret;
107
108 mutex_lock(&st->lock);
109 ret = tle62x0_read(st);
110
111 dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
112
113 for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
114 fault <<= 8;
115 fault |= ((unsigned long)buff[ptr]);
116
117 dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
118 }
119
120 for (ptr = 0; ptr < st->nr_gpio; ptr++) {
121 bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
122 }
123
124 *bp++ = '\n';
125
126 mutex_unlock(&st->lock);
127 return bp - buf;
128}
129
130static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
131
132static ssize_t tle62x0_gpio_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
134{
135 struct tle62x0_state *st = dev_get_drvdata(dev);
136 int gpio_num = to_gpio_num(attr);
137 int value;
138
139 mutex_lock(&st->lock);
140 value = (st->gpio_state >> gpio_num) & 1;
141 mutex_unlock(&st->lock);
142
143 return snprintf(buf, PAGE_SIZE, "%d", value);
144}
145
146static ssize_t tle62x0_gpio_store(struct device *dev,
147 struct device_attribute *attr,
148 const char *buf, size_t len)
149{
150 struct tle62x0_state *st = dev_get_drvdata(dev);
151 int gpio_num = to_gpio_num(attr);
152 unsigned long val;
153 char *endp;
154
155 val = simple_strtoul(buf, &endp, 0);
156 if (buf == endp)
157 return -EINVAL;
158
159 dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
160
161 mutex_lock(&st->lock);
162
163 if (val)
164 st->gpio_state |= 1 << gpio_num;
165 else
166 st->gpio_state &= ~(1 << gpio_num);
167
168 tle62x0_write(st);
169 mutex_unlock(&st->lock);
170
171 return len;
172}
173
174static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
175 tle62x0_gpio_show, tle62x0_gpio_store);
176static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
177 tle62x0_gpio_show, tle62x0_gpio_store);
178static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
179 tle62x0_gpio_show, tle62x0_gpio_store);
180static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
181 tle62x0_gpio_show, tle62x0_gpio_store);
182static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
183 tle62x0_gpio_show, tle62x0_gpio_store);
184static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
185 tle62x0_gpio_show, tle62x0_gpio_store);
186static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
187 tle62x0_gpio_show, tle62x0_gpio_store);
188static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
189 tle62x0_gpio_show, tle62x0_gpio_store);
190static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
191 tle62x0_gpio_show, tle62x0_gpio_store);
192static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
193 tle62x0_gpio_show, tle62x0_gpio_store);
194static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
195 tle62x0_gpio_show, tle62x0_gpio_store);
196static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
197 tle62x0_gpio_show, tle62x0_gpio_store);
198static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
199 tle62x0_gpio_show, tle62x0_gpio_store);
200static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
201 tle62x0_gpio_show, tle62x0_gpio_store);
202static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
203 tle62x0_gpio_show, tle62x0_gpio_store);
204static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
205 tle62x0_gpio_show, tle62x0_gpio_store);
206
207static struct device_attribute *gpio_attrs[] = {
208 [0] = &dev_attr_gpio1,
209 [1] = &dev_attr_gpio2,
210 [2] = &dev_attr_gpio3,
211 [3] = &dev_attr_gpio4,
212 [4] = &dev_attr_gpio5,
213 [5] = &dev_attr_gpio6,
214 [6] = &dev_attr_gpio7,
215 [7] = &dev_attr_gpio8,
216 [8] = &dev_attr_gpio9,
217 [9] = &dev_attr_gpio10,
218 [10] = &dev_attr_gpio11,
219 [11] = &dev_attr_gpio12,
220 [12] = &dev_attr_gpio13,
221 [13] = &dev_attr_gpio14,
222 [14] = &dev_attr_gpio15,
223 [15] = &dev_attr_gpio16
224};
225
226static int to_gpio_num(struct device_attribute *attr)
227{
228 int ptr;
229
230 for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
231 if (gpio_attrs[ptr] == attr)
232 return ptr;
233 }
234
235 return -1;
236}
237
238static int __devinit tle62x0_probe(struct spi_device *spi)
239{
240 struct tle62x0_state *st;
241 struct tle62x0_pdata *pdata;
242 int ptr;
243 int ret;
244
245 pdata = spi->dev.platform_data;
246 if (pdata == NULL) {
247 dev_err(&spi->dev, "no device data specified\n");
248 return -EINVAL;
249 }
250
251 st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
252 if (st == NULL) {
253 dev_err(&spi->dev, "no memory for device state\n");
254 return -ENOMEM;
255 }
256
257 st->us = spi;
258 st->nr_gpio = pdata->gpio_count;
259 st->gpio_state = pdata->init_state;
260
261 mutex_init(&st->lock);
262
263 ret = device_create_file(&spi->dev, &dev_attr_status_show);
264 if (ret) {
265 dev_err(&spi->dev, "cannot create status attribute\n");
266 goto err_status;
267 }
268
269 for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
270 ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
271 if (ret) {
272 dev_err(&spi->dev, "cannot create gpio attribute\n");
273 goto err_gpios;
274 }
275 }
276
277 /* tle62x0_write(st); */
278 spi_set_drvdata(spi, st);
279 return 0;
280
281 err_gpios:
282 for (; ptr > 0; ptr--)
283 device_remove_file(&spi->dev, gpio_attrs[ptr]);
284
285 device_remove_file(&spi->dev, &dev_attr_status_show);
286
287 err_status:
288 kfree(st);
289 return ret;
290}
291
292static int __devexit tle62x0_remove(struct spi_device *spi)
293{
294 struct tle62x0_state *st = spi_get_drvdata(spi);
295 int ptr;
296
297 for (ptr = 0; ptr < st->nr_gpio; ptr++)
298 device_remove_file(&spi->dev, gpio_attrs[ptr]);
299
300 kfree(st);
301 return 0;
302}
303
304static struct spi_driver tle62x0_driver = {
305 .driver = {
306 .name = "tle62x0",
307 .owner = THIS_MODULE,
308 },
309 .probe = tle62x0_probe,
310 .remove = __devexit_p(tle62x0_remove),
311};
312
313static __init int tle62x0_init(void)
314{
315 return spi_register_driver(&tle62x0_driver);
316}
317
318static __exit void tle62x0_exit(void)
319{
320 spi_unregister_driver(&tle62x0_driver);
321}
322
323module_init(tle62x0_init);
324module_exit(tle62x0_exit);
325
326MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
327MODULE_DESCRIPTION("TLE62x0 SPI driver");
328MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
new file mode 100644
index 000000000000..f0bf9a68e96b
--- /dev/null
+++ b/drivers/spi/xilinx_spi.c
@@ -0,0 +1,434 @@
1/*
2 * xilinx_spi.c
3 *
4 * Xilinx SPI controller driver (master mode only)
5 *
6 * Author: MontaVista Software, Inc.
7 * source@mvista.com
8 *
9 * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the
10 * terms of the GNU General Public License version 2. This program is licensed
11 * "as is" without any warranty of any kind, whether express or implied.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/spi/spi.h>
19#include <linux/spi/spi_bitbang.h>
20#include <linux/io.h>
21
22#include <syslib/virtex_devices.h>
23
24#define XILINX_SPI_NAME "xspi"
25
26/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
27 * Product Specification", DS464
28 */
29#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */
30
31#define XSPI_CR_ENABLE 0x02
32#define XSPI_CR_MASTER_MODE 0x04
33#define XSPI_CR_CPOL 0x08
34#define XSPI_CR_CPHA 0x10
35#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL)
36#define XSPI_CR_TXFIFO_RESET 0x20
37#define XSPI_CR_RXFIFO_RESET 0x40
38#define XSPI_CR_MANUAL_SSELECT 0x80
39#define XSPI_CR_TRANS_INHIBIT 0x100
40
41#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */
42
43#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
44#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
45#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
46#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
47#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
48
49#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */
50#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */
51
52#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
53
54/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
55 * IPIF registers are 32 bit
56 */
57#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
58#define XIPIF_V123B_GINTR_ENABLE 0x80000000
59
60#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
61#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
62
63#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
64#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
65 * disabled */
66#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
67#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
68#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
69#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
70
71#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
72#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
73
74struct xilinx_spi {
75 /* bitbang has to be first */
76 struct spi_bitbang bitbang;
77 struct completion done;
78
79 void __iomem *regs; /* virt. address of the control registers */
80
81 u32 irq;
82
83 u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
84
85 u8 *rx_ptr; /* pointer in the Tx buffer */
86 const u8 *tx_ptr; /* pointer in the Rx buffer */
87 int remaining_bytes; /* the number of bytes left to transfer */
88};
89
90static void xspi_init_hw(void __iomem *regs_base)
91{
92 /* Reset the SPI device */
93 out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET,
94 XIPIF_V123B_RESET_MASK);
95 /* Disable all the interrupts just in case */
96 out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0);
97 /* Enable the global IPIF interrupt */
98 out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET,
99 XIPIF_V123B_GINTR_ENABLE);
100 /* Deselect the slave on the SPI bus */
101 out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff);
102 /* Disable the transmitter, enable Manual Slave Select Assertion,
103 * put SPI controller into master mode, and enable it */
104 out_be16(regs_base + XSPI_CR_OFFSET,
105 XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT
106 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE);
107}
108
109static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
110{
111 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
112
113 if (is_on == BITBANG_CS_INACTIVE) {
114 /* Deselect the slave on the SPI bus */
115 out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff);
116 } else if (is_on == BITBANG_CS_ACTIVE) {
117 /* Set the SPI clock phase and polarity */
118 u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET)
119 & ~XSPI_CR_MODE_MASK;
120 if (spi->mode & SPI_CPHA)
121 cr |= XSPI_CR_CPHA;
122 if (spi->mode & SPI_CPOL)
123 cr |= XSPI_CR_CPOL;
124 out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
125
126 /* We do not check spi->max_speed_hz here as the SPI clock
127 * frequency is not software programmable (the IP block design
128 * parameter)
129 */
130
131 /* Activate the chip select */
132 out_be32(xspi->regs + XSPI_SSR_OFFSET,
133 ~(0x0001 << spi->chip_select));
134 }
135}
136
137/* spi_bitbang requires custom setup_transfer() to be defined if there is a
138 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
139 * supports just 8 bits per word, and SPI clock can't be changed in software.
140 * Check for 8 bits per word. Chip select delay calculations could be
141 * added here as soon as bitbang_work() can be made aware of the delay value.
142 */
143static int xilinx_spi_setup_transfer(struct spi_device *spi,
144 struct spi_transfer *t)
145{
146 u8 bits_per_word;
147 u32 hz;
148 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
149
150 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
151 hz = (t) ? t->speed_hz : spi->max_speed_hz;
152 if (bits_per_word != 8) {
153 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
154 __FUNCTION__, bits_per_word);
155 return -EINVAL;
156 }
157
158 if (hz && xspi->speed_hz > hz) {
159 dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
160 __FUNCTION__, hz);
161 return -EINVAL;
162 }
163
164 return 0;
165}
166
167/* the spi->mode bits understood by this driver: */
168#define MODEBITS (SPI_CPOL | SPI_CPHA)
169
170static int xilinx_spi_setup(struct spi_device *spi)
171{
172 struct spi_bitbang *bitbang;
173 struct xilinx_spi *xspi;
174 int retval;
175
176 xspi = spi_master_get_devdata(spi->master);
177 bitbang = &xspi->bitbang;
178
179 if (!spi->bits_per_word)
180 spi->bits_per_word = 8;
181
182 if (spi->mode & ~MODEBITS) {
183 dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
184 __FUNCTION__, spi->mode & ~MODEBITS);
185 return -EINVAL;
186 }
187
188 retval = xilinx_spi_setup_transfer(spi, NULL);
189 if (retval < 0)
190 return retval;
191
192 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
193 __FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
194
195 return 0;
196}
197
198static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
199{
200 u8 sr;
201
202 /* Fill the Tx FIFO with as many bytes as possible */
203 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
204 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
205 if (xspi->tx_ptr) {
206 out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++);
207 } else {
208 out_8(xspi->regs + XSPI_TXD_OFFSET, 0);
209 }
210 xspi->remaining_bytes--;
211 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
212 }
213}
214
215static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
216{
217 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
218 u32 ipif_ier;
219 u16 cr;
220
221 /* We get here with transmitter inhibited */
222
223 xspi->tx_ptr = t->tx_buf;
224 xspi->rx_ptr = t->rx_buf;
225 xspi->remaining_bytes = t->len;
226 INIT_COMPLETION(xspi->done);
227
228 xilinx_spi_fill_tx_fifo(xspi);
229
230 /* Enable the transmit empty interrupt, which we use to determine
231 * progress on the transmission.
232 */
233 ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET);
234 out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET,
235 ipif_ier | XSPI_INTR_TX_EMPTY);
236
237 /* Start the transfer by not inhibiting the transmitter any longer */
238 cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT;
239 out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
240
241 wait_for_completion(&xspi->done);
242
243 /* Disable the transmit empty interrupt */
244 out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier);
245
246 return t->len - xspi->remaining_bytes;
247}
248
249
250/* This driver supports single master mode only. Hence Tx FIFO Empty
251 * is the only interrupt we care about.
252 * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
253 * Fault are not to happen.
254 */
255static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
256{
257 struct xilinx_spi *xspi = dev_id;
258 u32 ipif_isr;
259
260 /* Get the IPIF interrupts, and clear them immediately */
261 ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET);
262 out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr);
263
264 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
265 u16 cr;
266 u8 sr;
267
268 /* A transmit has just completed. Process received data and
269 * check for more data to transmit. Always inhibit the
270 * transmitter while the Isr refills the transmit register/FIFO,
271 * or make sure it is stopped if we're done.
272 */
273 cr = in_be16(xspi->regs + XSPI_CR_OFFSET);
274 out_be16(xspi->regs + XSPI_CR_OFFSET,
275 cr | XSPI_CR_TRANS_INHIBIT);
276
277 /* Read out all the data from the Rx FIFO */
278 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
279 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
280 u8 data;
281
282 data = in_8(xspi->regs + XSPI_RXD_OFFSET);
283 if (xspi->rx_ptr) {
284 *xspi->rx_ptr++ = data;
285 }
286 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
287 }
288
289 /* See if there is more data to send */
290 if (xspi->remaining_bytes > 0) {
291 xilinx_spi_fill_tx_fifo(xspi);
292 /* Start the transfer by not inhibiting the
293 * transmitter any longer
294 */
295 out_be16(xspi->regs + XSPI_CR_OFFSET, cr);
296 } else {
297 /* No more data to send.
298 * Indicate the transfer is completed.
299 */
300 complete(&xspi->done);
301 }
302 }
303
304 return IRQ_HANDLED;
305}
306
307static int __init xilinx_spi_probe(struct platform_device *dev)
308{
309 int ret = 0;
310 struct spi_master *master;
311 struct xilinx_spi *xspi;
312 struct xspi_platform_data *pdata;
313 struct resource *r;
314
315 /* Get resources(memory, IRQ) associated with the device */
316 master = spi_alloc_master(&dev->dev, sizeof(struct xilinx_spi));
317
318 if (master == NULL) {
319 return -ENOMEM;
320 }
321
322 platform_set_drvdata(dev, master);
323 pdata = dev->dev.platform_data;
324
325 if (pdata == NULL) {
326 ret = -ENODEV;
327 goto put_master;
328 }
329
330 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
331 if (r == NULL) {
332 ret = -ENODEV;
333 goto put_master;
334 }
335
336 xspi = spi_master_get_devdata(master);
337 xspi->bitbang.master = spi_master_get(master);
338 xspi->bitbang.chipselect = xilinx_spi_chipselect;
339 xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
340 xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
341 xspi->bitbang.master->setup = xilinx_spi_setup;
342 init_completion(&xspi->done);
343
344 if (!request_mem_region(r->start,
345 r->end - r->start + 1, XILINX_SPI_NAME)) {
346 ret = -ENXIO;
347 goto put_master;
348 }
349
350 xspi->regs = ioremap(r->start, r->end - r->start + 1);
351 if (xspi->regs == NULL) {
352 ret = -ENOMEM;
353 goto put_master;
354 }
355
356 xspi->irq = platform_get_irq(dev, 0);
357 if (xspi->irq < 0) {
358 ret = -ENXIO;
359 goto unmap_io;
360 }
361
362 master->bus_num = pdata->bus_num;
363 master->num_chipselect = pdata->num_chipselect;
364 xspi->speed_hz = pdata->speed_hz;
365
366 /* SPI controller initializations */
367 xspi_init_hw(xspi->regs);
368
369 /* Register for SPI Interrupt */
370 ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
371 if (ret != 0)
372 goto unmap_io;
373
374 ret = spi_bitbang_start(&xspi->bitbang);
375 if (ret != 0) {
376 dev_err(&dev->dev, "spi_bitbang_start FAILED\n");
377 goto free_irq;
378 }
379
380 dev_info(&dev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n",
381 r->start, (u32)xspi->regs, xspi->irq);
382
383 return ret;
384
385free_irq:
386 free_irq(xspi->irq, xspi);
387unmap_io:
388 iounmap(xspi->regs);
389put_master:
390 spi_master_put(master);
391 return ret;
392}
393
394static int __devexit xilinx_spi_remove(struct platform_device *dev)
395{
396 struct xilinx_spi *xspi;
397 struct spi_master *master;
398
399 master = platform_get_drvdata(dev);
400 xspi = spi_master_get_devdata(master);
401
402 spi_bitbang_stop(&xspi->bitbang);
403 free_irq(xspi->irq, xspi);
404 iounmap(xspi->regs);
405 platform_set_drvdata(dev, 0);
406 spi_master_put(xspi->bitbang.master);
407
408 return 0;
409}
410
411static struct platform_driver xilinx_spi_driver = {
412 .probe = xilinx_spi_probe,
413 .remove = __devexit_p(xilinx_spi_remove),
414 .driver = {
415 .name = XILINX_SPI_NAME,
416 .owner = THIS_MODULE,
417 },
418};
419
420static int __init xilinx_spi_init(void)
421{
422 return platform_driver_register(&xilinx_spi_driver);
423}
424module_init(xilinx_spi_init);
425
426static void __exit xilinx_spi_exit(void)
427{
428 platform_driver_unregister(&xilinx_spi_driver);
429}
430module_exit(xilinx_spi_exit);
431
432MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
433MODULE_DESCRIPTION("Xilinx SPI driver");
434MODULE_LICENSE("GPL");
diff --git a/drivers/telephony/Kconfig b/drivers/telephony/Kconfig
index 8f530e68263b..5f98f673f1b6 100644
--- a/drivers/telephony/Kconfig
+++ b/drivers/telephony/Kconfig
@@ -19,6 +19,7 @@ if PHONE
19 19
20config PHONE_IXJ 20config PHONE_IXJ
21 tristate "QuickNet Internet LineJack/PhoneJack support" 21 tristate "QuickNet Internet LineJack/PhoneJack support"
22 depends ISA || PCI
22 ---help--- 23 ---help---
23 Say M if you have a telephony card manufactured by Quicknet 24 Say M if you have a telephony card manufactured by Quicknet
24 Technologies, Inc. These include the Internet PhoneJACK and 25 Technologies, Inc. These include the Internet PhoneJACK and
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index c7b0a357b04a..49cd9793404f 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -3453,7 +3453,6 @@ static void ixj_write_frame(IXJ *j)
3453{ 3453{
3454 int cnt, frame_count, dly; 3454 int cnt, frame_count, dly;
3455 IXJ_WORD dat; 3455 IXJ_WORD dat;
3456 BYTES blankword;
3457 3456
3458 frame_count = 0; 3457 frame_count = 0;
3459 if(j->flags.cidplay) { 3458 if(j->flags.cidplay) {
@@ -3501,6 +3500,8 @@ static void ixj_write_frame(IXJ *j)
3501 } 3500 }
3502 if (frame_count >= 1) { 3501 if (frame_count >= 1) {
3503 if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) { 3502 if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
3503 BYTES blankword;
3504
3504 switch (j->play_mode) { 3505 switch (j->play_mode) {
3505 case PLAYBACK_MODE_ULAW: 3506 case PLAYBACK_MODE_ULAW:
3506 case PLAYBACK_MODE_ALAW: 3507 case PLAYBACK_MODE_ALAW:
@@ -3508,6 +3509,7 @@ static void ixj_write_frame(IXJ *j)
3508 break; 3509 break;
3509 case PLAYBACK_MODE_8LINEAR: 3510 case PLAYBACK_MODE_8LINEAR:
3510 case PLAYBACK_MODE_16LINEAR: 3511 case PLAYBACK_MODE_16LINEAR:
3512 default:
3511 blankword.low = blankword.high = 0x00; 3513 blankword.low = blankword.high = 0x00;
3512 break; 3514 break;
3513 case PLAYBACK_MODE_8LINEAR_WSS: 3515 case PLAYBACK_MODE_8LINEAR_WSS:
@@ -3531,6 +3533,8 @@ static void ixj_write_frame(IXJ *j)
3531 j->flags.play_first_frame = 0; 3533 j->flags.play_first_frame = 0;
3532 } else if (j->play_codec == G723_63 && j->flags.play_first_frame) { 3534 } else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
3533 for (cnt = 0; cnt < 24; cnt++) { 3535 for (cnt = 0; cnt < 24; cnt++) {
3536 BYTES blankword;
3537
3534 if(cnt == 12) { 3538 if(cnt == 12) {
3535 blankword.low = 0x02; 3539 blankword.low = 0x02;
3536 blankword.high = 0x00; 3540 blankword.high = 0x00;
@@ -4868,6 +4872,7 @@ static char daa_CR_read(IXJ *j, int cr)
4868 bytes.high = 0xB0 + cr; 4872 bytes.high = 0xB0 + cr;
4869 break; 4873 break;
4870 case SOP_PU_PULSEDIALING: 4874 case SOP_PU_PULSEDIALING:
4875 default:
4871 bytes.high = 0xF0 + cr; 4876 bytes.high = 0xF0 + cr;
4872 break; 4877 break;
4873 } 4878 }
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 071b9675a781..7dd73546bf43 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -16,7 +16,7 @@ config USB_ARCH_HAS_HCD
16 boolean 16 boolean
17 default y if USB_ARCH_HAS_OHCI 17 default y if USB_ARCH_HAS_OHCI
18 default y if USB_ARCH_HAS_EHCI 18 default y if USB_ARCH_HAS_EHCI
19 default y if PCMCIA # sl811_cs 19 default y if PCMCIA && !M32R # sl811_cs
20 default y if ARM # SL-811 20 default y if ARM # SL-811
21 default PCI 21 default PCI
22 22
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 4973e147bc79..8f046659b4e9 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -1168,6 +1168,7 @@ static int uea_kthread(void *data)
1168 struct uea_softc *sc = data; 1168 struct uea_softc *sc = data;
1169 int ret = -EAGAIN; 1169 int ret = -EAGAIN;
1170 1170
1171 set_freezable();
1171 uea_enters(INS_TO_USBDEV(sc)); 1172 uea_enters(INS_TO_USBDEV(sc));
1172 while (!kthread_should_stop()) { 1173 while (!kthread_should_stop()) {
1173 if (ret < 0 || sc->reset) 1174 if (ret < 0 || sc->reset)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 50e79010401c..fd74c50b1804 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2728,6 +2728,7 @@ loop:
2728 2728
2729static int hub_thread(void *__unused) 2729static int hub_thread(void *__unused)
2730{ 2730{
2731 set_freezable();
2731 do { 2732 do {
2732 hub_events(); 2733 hub_events();
2733 wait_event_interruptible(khubd_wait, 2734 wait_event_interruptible(khubd_wait,
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 8712ef987179..be7a1bd2823b 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -3434,6 +3434,9 @@ static int fsg_main_thread(void *fsg_)
3434 allow_signal(SIGKILL); 3434 allow_signal(SIGKILL);
3435 allow_signal(SIGUSR1); 3435 allow_signal(SIGUSR1);
3436 3436
3437 /* Allow the thread to be frozen */
3438 set_freezable();
3439
3437 /* Arrange for userspace references to be interpreted as kernel 3440 /* Arrange for userspace references to be interpreted as kernel
3438 * pointers. That way we can pass a kernel pointer to a routine 3441 * pointers. That way we can pass a kernel pointer to a routine
3439 * that expects a __user pointer and it will work okay. */ 3442 * that expects a __user pointer and it will work okay. */
diff --git a/drivers/usb/misc/auerswald.c b/drivers/usb/misc/auerswald.c
index 1fd5fc220cd7..42d4e6454a77 100644
--- a/drivers/usb/misc/auerswald.c
+++ b/drivers/usb/misc/auerswald.c
@@ -630,7 +630,7 @@ static int auerchain_start_wait_urb (pauerchain_t acp, struct urb *urb, int time
630 } else 630 } else
631 status = urb->status; 631 status = urb->status;
632 632
633 if (actual_length) 633 if (status >= 0)
634 *actual_length = urb->actual_length; 634 *actual_length = urb->actual_length;
635 635
636 return status; 636 return status;
@@ -664,7 +664,7 @@ static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsi
664 int ret; 664 int ret;
665 struct usb_ctrlrequest *dr; 665 struct usb_ctrlrequest *dr;
666 struct urb *urb; 666 struct urb *urb;
667 int length; 667 int uninitialized_var(length);
668 668
669 dbg ("auerchain_control_msg"); 669 dbg ("auerchain_control_msg");
670 dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL); 670 dr = kmalloc (sizeof (struct usb_ctrlrequest), GFP_KERNEL);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index bef8bcd9bd98..28842d208bb0 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -311,8 +311,6 @@ static int usb_stor_control_thread(void * __us)
311 struct Scsi_Host *host = us_to_host(us); 311 struct Scsi_Host *host = us_to_host(us);
312 int autopm_rc; 312 int autopm_rc;
313 313
314 current->flags |= PF_NOFREEZE;
315
316 for(;;) { 314 for(;;) {
317 US_DEBUGP("*** thread sleeping.\n"); 315 US_DEBUGP("*** thread sleeping.\n");
318 if(down_interruptible(&us->sema)) 316 if(down_interruptible(&us->sema))
@@ -920,6 +918,7 @@ static int usb_stor_scan_thread(void * __us)
920 printk(KERN_DEBUG 918 printk(KERN_DEBUG
921 "usb-storage: device found at %d\n", us->pusb_dev->devnum); 919 "usb-storage: device found at %d\n", us->pusb_dev->devnum);
922 920
921 set_freezable();
923 /* Wait for the timeout to expire or for a disconnect */ 922 /* Wait for the timeout to expire or for a disconnect */
924 if (delay_use > 0) { 923 if (delay_use > 0) {
925 printk(KERN_DEBUG "usb-storage: waiting for device " 924 printk(KERN_DEBUG "usb-storage: waiting for device "
diff --git a/drivers/video/68328fb.c b/drivers/video/68328fb.c
index 0dda73da8628..7f907fb23b8a 100644
--- a/drivers/video/68328fb.c
+++ b/drivers/video/68328fb.c
@@ -60,7 +60,7 @@ static u_long videomemory;
60static u_long videomemorysize; 60static u_long videomemorysize;
61 61
62static struct fb_info fb_info; 62static struct fb_info fb_info;
63static u32 mc68x328fb_pseudo_palette[17]; 63static u32 mc68x328fb_pseudo_palette[16];
64 64
65static struct fb_var_screeninfo mc68x328fb_default __initdata = { 65static struct fb_var_screeninfo mc68x328fb_default __initdata = {
66 .red = { 0, 8, 0 }, 66 .red = { 0, 8, 0 },
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 9b7a76be36a0..0c5644bb59af 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -812,7 +812,7 @@ config FB_PVR2
812 812
813config FB_EPSON1355 813config FB_EPSON1355
814 bool "Epson 1355 framebuffer support" 814 bool "Epson 1355 framebuffer support"
815 depends on (FB = y) && (SUPERH || ARCH_CEIVA) 815 depends on (FB = y) && ARCH_CEIVA
816 select FB_CFB_FILLRECT 816 select FB_CFB_FILLRECT
817 select FB_CFB_COPYAREA 817 select FB_CFB_COPYAREA
818 select FB_CFB_IMAGEBLIT 818 select FB_CFB_IMAGEBLIT
@@ -1820,6 +1820,10 @@ config FB_XILINX
1820 framebuffer. ML300 carries a 640*480 LCD display on the board, 1820 framebuffer. ML300 carries a 640*480 LCD display on the board,
1821 ML403 uses a standard DB15 VGA connector. 1821 ML403 uses a standard DB15 VGA connector.
1822 1822
1823if ARCH_OMAP
1824 source "drivers/video/omap/Kconfig"
1825endif
1826
1823config FB_VIRTUAL 1827config FB_VIRTUAL
1824 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)" 1828 tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
1825 depends on FB 1829 depends on FB
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index bd8b05229500..a562f9d69d2c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -113,6 +113,7 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
113obj-$(CONFIG_FB_PS3) += ps3fb.o 113obj-$(CONFIG_FB_PS3) += ps3fb.o
114obj-$(CONFIG_FB_SM501) += sm501fb.o 114obj-$(CONFIG_FB_SM501) += sm501fb.o
115obj-$(CONFIG_FB_XILINX) += xilinxfb.o 115obj-$(CONFIG_FB_XILINX) += xilinxfb.o
116obj-$(CONFIG_FB_OMAP) += omap/
116 117
117# Platform or fallback drivers go here 118# Platform or fallback drivers go here
118obj-$(CONFIG_FB_VESA) += vesafb.o 119obj-$(CONFIG_FB_VESA) += vesafb.o
diff --git a/drivers/video/aty/ati_ids.h b/drivers/video/aty/ati_ids.h
index 90e7df22f508..685a754991c6 100644
--- a/drivers/video/aty/ati_ids.h
+++ b/drivers/video/aty/ati_ids.h
@@ -204,6 +204,7 @@
204#define PCI_CHIP_RV280_5961 0x5961 204#define PCI_CHIP_RV280_5961 0x5961
205#define PCI_CHIP_RV280_5962 0x5962 205#define PCI_CHIP_RV280_5962 0x5962
206#define PCI_CHIP_RV280_5964 0x5964 206#define PCI_CHIP_RV280_5964 0x5964
207#define PCI_CHIP_RS485_5975 0x5975
207#define PCI_CHIP_RV280_5C61 0x5C61 208#define PCI_CHIP_RV280_5C61 0x5C61
208#define PCI_CHIP_RV280_5C63 0x5C63 209#define PCI_CHIP_RV280_5C63 0x5C63
209#define PCI_CHIP_R423_5D57 0x5D57 210#define PCI_CHIP_R423_5D57 0x5D57
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 2fbff6317433..ef330e34d031 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -541,7 +541,7 @@ static char ram_off[] __devinitdata = "OFF";
541#endif /* CONFIG_FB_ATY_CT */ 541#endif /* CONFIG_FB_ATY_CT */
542 542
543 543
544static u32 pseudo_palette[17]; 544static u32 pseudo_palette[16];
545 545
546#ifdef CONFIG_FB_ATY_GX 546#ifdef CONFIG_FB_ATY_GX
547static char *aty_gx_ram[8] __devinitdata = { 547static char *aty_gx_ram[8] __devinitdata = {
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 2349e71b0083..47ca62fe7c3e 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -153,6 +153,8 @@ static struct pci_device_id radeonfb_pci_table[] = {
153 /* Mobility 9200 (M9+) */ 153 /* Mobility 9200 (M9+) */
154 CHIP_DEF(PCI_CHIP_RV280_5C61, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY), 154 CHIP_DEF(PCI_CHIP_RV280_5C61, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
155 CHIP_DEF(PCI_CHIP_RV280_5C63, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY), 155 CHIP_DEF(PCI_CHIP_RV280_5C63, RV280, CHIP_HAS_CRTC2 | CHIP_IS_MOBILITY),
156 /*Mobility Xpress 200 */
157 CHIP_DEF(PCI_CHIP_RS485_5975, R300, CHIP_HAS_CRTC2 | CHIP_IS_IGP | CHIP_IS_MOBILITY),
156 /* 9200 */ 158 /* 9200 */
157 CHIP_DEF(PCI_CHIP_RV280_5960, RV280, CHIP_HAS_CRTC2), 159 CHIP_DEF(PCI_CHIP_RV280_5960, RV280, CHIP_HAS_CRTC2),
158 CHIP_DEF(PCI_CHIP_RV280_5961, RV280, CHIP_HAS_CRTC2), 160 CHIP_DEF(PCI_CHIP_RV280_5961, RV280, CHIP_HAS_CRTC2),
diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h
index 7ebffcdfd1e3..7c922c7b460b 100644
--- a/drivers/video/aty/radeonfb.h
+++ b/drivers/video/aty/radeonfb.h
@@ -301,7 +301,7 @@ struct radeonfb_info {
301 void __iomem *bios_seg; 301 void __iomem *bios_seg;
302 int fp_bios_start; 302 int fp_bios_start;
303 303
304 u32 pseudo_palette[17]; 304 u32 pseudo_palette[16];
305 struct { u8 red, green, blue, pad; } 305 struct { u8 red, green, blue, pad; }
306 palette[256]; 306 palette[256];
307 307
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index d3b8a6be2916..49643969f9f8 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -118,6 +118,22 @@ config FRAMEBUFFER_CONSOLE
118 help 118 help
119 Low-level framebuffer-based console driver. 119 Low-level framebuffer-based console driver.
120 120
121config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
122 bool "Map the console to the primary display device"
123 depends on FRAMEBUFFER_CONSOLE
124 default n
125 ---help---
126 If this option is selected, the framebuffer console will
127 automatically select the primary display device (if the architecture
128 supports this feature). Otherwise, the framebuffer console will
129 always select the first framebuffer driver that is loaded. The latter
130 is the default behavior.
131
132 You can always override the automatic selection of the primary device
133 by using the fbcon=map: boot option.
134
135 If unsure, select n.
136
121config FRAMEBUFFER_CONSOLE_ROTATION 137config FRAMEBUFFER_CONSOLE_ROTATION
122 bool "Framebuffer Console Rotation" 138 bool "Framebuffer Console Rotation"
123 depends on FRAMEBUFFER_CONSOLE 139 depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 73813c60d03a..decfdc8eb9cc 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -75,6 +75,7 @@
75#include <linux/init.h> 75#include <linux/init.h>
76#include <linux/interrupt.h> 76#include <linux/interrupt.h>
77#include <linux/crc32.h> /* For counting font checksums */ 77#include <linux/crc32.h> /* For counting font checksums */
78#include <asm/fb.h>
78#include <asm/irq.h> 79#include <asm/irq.h>
79#include <asm/system.h> 80#include <asm/system.h>
80#include <asm/uaccess.h> 81#include <asm/uaccess.h>
@@ -125,6 +126,8 @@ static int first_fb_vc;
125static int last_fb_vc = MAX_NR_CONSOLES - 1; 126static int last_fb_vc = MAX_NR_CONSOLES - 1;
126static int fbcon_is_default = 1; 127static int fbcon_is_default = 1;
127static int fbcon_has_exited; 128static int fbcon_has_exited;
129static int primary_device = -1;
130static int map_override;
128 131
129/* font data */ 132/* font data */
130static char fontname[40]; 133static char fontname[40];
@@ -152,6 +155,7 @@ static int fbcon_set_origin(struct vc_data *);
152#define DEFAULT_CURSOR_BLINK_RATE (20) 155#define DEFAULT_CURSOR_BLINK_RATE (20)
153 156
154static int vbl_cursor_cnt; 157static int vbl_cursor_cnt;
158static int fbcon_cursor_noblink;
155 159
156#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1) 160#define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1)
157 161
@@ -188,16 +192,14 @@ static __inline__ void ypan_down(struct vc_data *vc, int count);
188static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx, 192static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
189 int dy, int dx, int height, int width, u_int y_break); 193 int dy, int dx, int height, int width, u_int y_break);
190static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, 194static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
191 struct vc_data *vc); 195 int unit);
192static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
193 int unit);
194static void fbcon_redraw_move(struct vc_data *vc, struct display *p, 196static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
195 int line, int count, int dy); 197 int line, int count, int dy);
196static void fbcon_modechanged(struct fb_info *info); 198static void fbcon_modechanged(struct fb_info *info);
197static void fbcon_set_all_vcs(struct fb_info *info); 199static void fbcon_set_all_vcs(struct fb_info *info);
198static void fbcon_start(void); 200static void fbcon_start(void);
199static void fbcon_exit(void); 201static void fbcon_exit(void);
200static struct class_device *fbcon_class_device; 202static struct device *fbcon_device;
201 203
202#ifdef CONFIG_MAC 204#ifdef CONFIG_MAC
203/* 205/*
@@ -441,7 +443,8 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
441 struct fbcon_ops *ops = info->fbcon_par; 443 struct fbcon_ops *ops = info->fbcon_par;
442 444
443 if ((!info->queue.func || info->queue.func == fb_flashcursor) && 445 if ((!info->queue.func || info->queue.func == fb_flashcursor) &&
444 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { 446 !(ops->flags & FBCON_FLAGS_CURSOR_TIMER) &&
447 !fbcon_cursor_noblink) {
445 if (!info->queue.func) 448 if (!info->queue.func)
446 INIT_WORK(&info->queue, fb_flashcursor); 449 INIT_WORK(&info->queue, fb_flashcursor);
447 450
@@ -495,13 +498,17 @@ static int __init fb_console_setup(char *this_opt)
495 498
496 if (!strncmp(options, "map:", 4)) { 499 if (!strncmp(options, "map:", 4)) {
497 options += 4; 500 options += 4;
498 if (*options) 501 if (*options) {
499 for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) { 502 for (i = 0, j = 0; i < MAX_NR_CONSOLES; i++) {
500 if (!options[j]) 503 if (!options[j])
501 j = 0; 504 j = 0;
502 con2fb_map_boot[i] = 505 con2fb_map_boot[i] =
503 (options[j++]-'0') % FB_MAX; 506 (options[j++]-'0') % FB_MAX;
504 } 507 }
508
509 map_override = 1;
510 }
511
505 return 1; 512 return 1;
506 } 513 }
507 514
@@ -736,7 +743,9 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
736 743
737 if (!err) { 744 if (!err) {
738 info->fbcon_par = ops; 745 info->fbcon_par = ops;
739 set_blitting_type(vc, info); 746
747 if (vc)
748 set_blitting_type(vc, info);
740 } 749 }
741 750
742 if (err) { 751 if (err) {
@@ -798,11 +807,7 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
798 807
799 ops->flags |= FBCON_FLAGS_INIT; 808 ops->flags |= FBCON_FLAGS_INIT;
800 ops->graphics = 0; 809 ops->graphics = 0;
801 810 fbcon_set_disp(info, &info->var, unit);
802 if (vc)
803 fbcon_set_disp(info, &info->var, vc);
804 else
805 fbcon_preset_disp(info, &info->var, unit);
806 811
807 if (show_logo) { 812 if (show_logo) {
808 struct vc_data *fg_vc = vc_cons[fg_console].d; 813 struct vc_data *fg_vc = vc_cons[fg_console].d;
@@ -1107,6 +1112,9 @@ static void fbcon_init(struct vc_data *vc, int init)
1107 if (var_to_display(p, &info->var, info)) 1112 if (var_to_display(p, &info->var, info))
1108 return; 1113 return;
1109 1114
1115 if (!info->fbcon_par)
1116 con2fb_acquire_newinfo(vc, info, vc->vc_num, -1);
1117
1110 /* If we are not the first console on this 1118 /* If we are not the first console on this
1111 fb, copy the font from that console */ 1119 fb, copy the font from that console */
1112 t = &fb_display[fg_console]; 1120 t = &fb_display[fg_console];
@@ -1349,6 +1357,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
1349 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) 1357 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
1350 return; 1358 return;
1351 1359
1360 if (vc->vc_cursor_type & 0x10)
1361 fbcon_del_cursor_timer(info);
1362 else
1363 fbcon_add_cursor_timer(info);
1364
1352 ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; 1365 ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
1353 if (mode & CM_SOFTBACK) { 1366 if (mode & CM_SOFTBACK) {
1354 mode &= ~CM_SOFTBACK; 1367 mode &= ~CM_SOFTBACK;
@@ -1368,36 +1381,29 @@ static int scrollback_phys_max = 0;
1368static int scrollback_max = 0; 1381static int scrollback_max = 0;
1369static int scrollback_current = 0; 1382static int scrollback_current = 0;
1370 1383
1371/*
1372 * If no vc is existent yet, just set struct display
1373 */
1374static void fbcon_preset_disp(struct fb_info *info, struct fb_var_screeninfo *var,
1375 int unit)
1376{
1377 struct display *p = &fb_display[unit];
1378 struct display *t = &fb_display[fg_console];
1379
1380 if (var_to_display(p, var, info))
1381 return;
1382
1383 p->fontdata = t->fontdata;
1384 p->userfont = t->userfont;
1385 if (p->userfont)
1386 REFCOUNT(p->fontdata)++;
1387}
1388
1389static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, 1384static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
1390 struct vc_data *vc) 1385 int unit)
1391{ 1386{
1392 struct display *p = &fb_display[vc->vc_num], *t; 1387 struct display *p, *t;
1393 struct vc_data **default_mode = vc->vc_display_fg; 1388 struct vc_data **default_mode, *vc;
1394 struct vc_data *svc = *default_mode; 1389 struct vc_data *svc;
1395 struct fbcon_ops *ops = info->fbcon_par; 1390 struct fbcon_ops *ops = info->fbcon_par;
1396 int rows, cols, charcnt = 256; 1391 int rows, cols, charcnt = 256;
1397 1392
1393 p = &fb_display[unit];
1394
1398 if (var_to_display(p, var, info)) 1395 if (var_to_display(p, var, info))
1399 return; 1396 return;
1397
1398 vc = vc_cons[unit].d;
1399
1400 if (!vc)
1401 return;
1402
1403 default_mode = vc->vc_display_fg;
1404 svc = *default_mode;
1400 t = &fb_display[svc->vc_num]; 1405 t = &fb_display[svc->vc_num];
1406
1401 if (!vc->vc_font.data) { 1407 if (!vc->vc_font.data) {
1402 vc->vc_font.data = (void *)(p->fontdata = t->fontdata); 1408 vc->vc_font.data = (void *)(p->fontdata = t->fontdata);
1403 vc->vc_font.width = (*default_mode)->vc_font.width; 1409 vc->vc_font.width = (*default_mode)->vc_font.width;
@@ -1704,6 +1710,56 @@ static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
1704 } 1710 }
1705} 1711}
1706 1712
1713static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
1714 struct display *p, int line, int count, int ycount)
1715{
1716 int offset = ycount * vc->vc_cols;
1717 unsigned short *d = (unsigned short *)
1718 (vc->vc_origin + vc->vc_size_row * line);
1719 unsigned short *s = d + offset;
1720 struct fbcon_ops *ops = info->fbcon_par;
1721
1722 while (count--) {
1723 unsigned short *start = s;
1724 unsigned short *le = advance_row(s, 1);
1725 unsigned short c;
1726 int x = 0;
1727
1728 do {
1729 c = scr_readw(s);
1730
1731 if (c == scr_readw(d)) {
1732 if (s > start) {
1733 ops->bmove(vc, info, line + ycount, x,
1734 line, x, 1, s-start);
1735 x += s - start + 1;
1736 start = s + 1;
1737 } else {
1738 x++;
1739 start++;
1740 }
1741 }
1742
1743 scr_writew(c, d);
1744 console_conditional_schedule();
1745 s++;
1746 d++;
1747 } while (s < le);
1748 if (s > start)
1749 ops->bmove(vc, info, line + ycount, x, line, x, 1,
1750 s-start);
1751 console_conditional_schedule();
1752 if (ycount > 0)
1753 line++;
1754 else {
1755 line--;
1756 /* NOTE: We subtract two lines from these pointers */
1757 s -= vc->vc_size_row;
1758 d -= vc->vc_size_row;
1759 }
1760 }
1761}
1762
1707static void fbcon_redraw(struct vc_data *vc, struct display *p, 1763static void fbcon_redraw(struct vc_data *vc, struct display *p,
1708 int line, int count, int offset) 1764 int line, int count, int offset)
1709{ 1765{
@@ -1789,7 +1845,6 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1789{ 1845{
1790 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; 1846 struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
1791 struct display *p = &fb_display[vc->vc_num]; 1847 struct display *p = &fb_display[vc->vc_num];
1792 struct fbcon_ops *ops = info->fbcon_par;
1793 int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; 1848 int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
1794 1849
1795 if (fbcon_is_inactive(vc, info)) 1850 if (fbcon_is_inactive(vc, info))
@@ -1813,10 +1868,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1813 goto redraw_up; 1868 goto redraw_up;
1814 switch (p->scrollmode) { 1869 switch (p->scrollmode) {
1815 case SCROLL_MOVE: 1870 case SCROLL_MOVE:
1816 ops->bmove(vc, info, t + count, 0, t, 0, 1871 fbcon_redraw_blit(vc, info, p, t, b - t - count,
1817 b - t - count, vc->vc_cols); 1872 count);
1818 ops->clear(vc, info, b - count, 0, count, 1873 fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
1819 vc->vc_cols); 1874 scr_memsetw((unsigned short *) (vc->vc_origin +
1875 vc->vc_size_row *
1876 (b - count)),
1877 vc->vc_video_erase_char,
1878 vc->vc_size_row * count);
1879 return 1;
1820 break; 1880 break;
1821 1881
1822 case SCROLL_WRAP_MOVE: 1882 case SCROLL_WRAP_MOVE:
@@ -1899,9 +1959,15 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
1899 goto redraw_down; 1959 goto redraw_down;
1900 switch (p->scrollmode) { 1960 switch (p->scrollmode) {
1901 case SCROLL_MOVE: 1961 case SCROLL_MOVE:
1902 ops->bmove(vc, info, t, 0, t + count, 0, 1962 fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
1903 b - t - count, vc->vc_cols); 1963 -count);
1904 ops->clear(vc, info, t, 0, count, vc->vc_cols); 1964 fbcon_clear(vc, t, 0, count, vc->vc_cols);
1965 scr_memsetw((unsigned short *) (vc->vc_origin +
1966 vc->vc_size_row *
1967 t),
1968 vc->vc_video_erase_char,
1969 vc->vc_size_row * count);
1970 return 1;
1905 break; 1971 break;
1906 1972
1907 case SCROLL_WRAP_MOVE: 1973 case SCROLL_WRAP_MOVE:
@@ -2937,9 +3003,48 @@ static int fbcon_mode_deleted(struct fb_info *info,
2937 return found; 3003 return found;
2938} 3004}
2939 3005
2940static int fbcon_fb_unregistered(int idx) 3006#ifdef CONFIG_VT_HW_CONSOLE_BINDING
3007static int fbcon_unbind(void)
2941{ 3008{
2942 int i; 3009 int ret;
3010
3011 ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
3012 fbcon_is_default);
3013 return ret;
3014}
3015#else
3016static inline int fbcon_unbind(void)
3017{
3018 return -EINVAL;
3019}
3020#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
3021
3022static int fbcon_fb_unbind(int idx)
3023{
3024 int i, new_idx = -1, ret = 0;
3025
3026 for (i = first_fb_vc; i <= last_fb_vc; i++) {
3027 if (con2fb_map[i] != idx &&
3028 con2fb_map[i] != -1) {
3029 new_idx = i;
3030 break;
3031 }
3032 }
3033
3034 if (new_idx != -1) {
3035 for (i = first_fb_vc; i <= last_fb_vc; i++) {
3036 if (con2fb_map[i] == idx)
3037 set_con2fb_map(i, new_idx, 0);
3038 }
3039 } else
3040 ret = fbcon_unbind();
3041
3042 return ret;
3043}
3044
3045static int fbcon_fb_unregistered(struct fb_info *info)
3046{
3047 int i, idx = info->node;
2943 3048
2944 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3049 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2945 if (con2fb_map[i] == idx) 3050 if (con2fb_map[i] == idx)
@@ -2967,12 +3072,48 @@ static int fbcon_fb_unregistered(int idx)
2967 if (!num_registered_fb) 3072 if (!num_registered_fb)
2968 unregister_con_driver(&fb_con); 3073 unregister_con_driver(&fb_con);
2969 3074
3075
3076 if (primary_device == idx)
3077 primary_device = -1;
3078
2970 return 0; 3079 return 0;
2971} 3080}
2972 3081
2973static int fbcon_fb_registered(int idx) 3082#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
3083static void fbcon_select_primary(struct fb_info *info)
2974{ 3084{
2975 int ret = 0, i; 3085 if (!map_override && primary_device == -1 &&
3086 fb_is_primary_device(info)) {
3087 int i;
3088
3089 printk(KERN_INFO "fbcon: %s (fb%i) is primary device\n",
3090 info->fix.id, info->node);
3091 primary_device = info->node;
3092
3093 for (i = first_fb_vc; i <= last_fb_vc; i++)
3094 con2fb_map_boot[i] = primary_device;
3095
3096 if (con_is_bound(&fb_con)) {
3097 printk(KERN_INFO "fbcon: Remapping primary device, "
3098 "fb%i, to tty %i-%i\n", info->node,
3099 first_fb_vc + 1, last_fb_vc + 1);
3100 info_idx = primary_device;
3101 }
3102 }
3103
3104}
3105#else
3106static inline void fbcon_select_primary(struct fb_info *info)
3107{
3108 return;
3109}
3110#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
3111
3112static int fbcon_fb_registered(struct fb_info *info)
3113{
3114 int ret = 0, i, idx = info->node;
3115
3116 fbcon_select_primary(info);
2976 3117
2977 if (info_idx == -1) { 3118 if (info_idx == -1) {
2978 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3119 for (i = first_fb_vc; i <= last_fb_vc; i++) {
@@ -2986,8 +3127,7 @@ static int fbcon_fb_registered(int idx)
2986 ret = fbcon_takeover(1); 3127 ret = fbcon_takeover(1);
2987 } else { 3128 } else {
2988 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3129 for (i = first_fb_vc; i <= last_fb_vc; i++) {
2989 if (con2fb_map_boot[i] == idx && 3130 if (con2fb_map_boot[i] == idx)
2990 con2fb_map[i] == -1)
2991 set_con2fb_map(i, idx, 0); 3131 set_con2fb_map(i, idx, 0);
2992 } 3132 }
2993 } 3133 }
@@ -3034,12 +3174,7 @@ static void fbcon_new_modelist(struct fb_info *info)
3034 mode = fb_find_nearest_mode(fb_display[i].mode, 3174 mode = fb_find_nearest_mode(fb_display[i].mode,
3035 &info->modelist); 3175 &info->modelist);
3036 fb_videomode_to_var(&var, mode); 3176 fb_videomode_to_var(&var, mode);
3037 3177 fbcon_set_disp(info, &var, vc->vc_num);
3038 if (vc)
3039 fbcon_set_disp(info, &var, vc);
3040 else
3041 fbcon_preset_disp(info, &var, i);
3042
3043 } 3178 }
3044} 3179}
3045 3180
@@ -3114,11 +3249,14 @@ static int fbcon_event_notify(struct notifier_block *self,
3114 mode = event->data; 3249 mode = event->data;
3115 ret = fbcon_mode_deleted(info, mode); 3250 ret = fbcon_mode_deleted(info, mode);
3116 break; 3251 break;
3252 case FB_EVENT_FB_UNBIND:
3253 ret = fbcon_fb_unbind(info->node);
3254 break;
3117 case FB_EVENT_FB_REGISTERED: 3255 case FB_EVENT_FB_REGISTERED:
3118 ret = fbcon_fb_registered(info->node); 3256 ret = fbcon_fb_registered(info);
3119 break; 3257 break;
3120 case FB_EVENT_FB_UNREGISTERED: 3258 case FB_EVENT_FB_UNREGISTERED:
3121 ret = fbcon_fb_unregistered(info->node); 3259 ret = fbcon_fb_unregistered(info);
3122 break; 3260 break;
3123 case FB_EVENT_SET_CONSOLE_MAP: 3261 case FB_EVENT_SET_CONSOLE_MAP:
3124 con2fb = event->data; 3262 con2fb = event->data;
@@ -3179,8 +3317,9 @@ static struct notifier_block fbcon_event_notifier = {
3179 .notifier_call = fbcon_event_notify, 3317 .notifier_call = fbcon_event_notify,
3180}; 3318};
3181 3319
3182static ssize_t store_rotate(struct class_device *class_device, 3320static ssize_t store_rotate(struct device *device,
3183 const char *buf, size_t count) 3321 struct device_attribute *attr, const char *buf,
3322 size_t count)
3184{ 3323{
3185 struct fb_info *info; 3324 struct fb_info *info;
3186 int rotate, idx; 3325 int rotate, idx;
@@ -3203,8 +3342,9 @@ err:
3203 return count; 3342 return count;
3204} 3343}
3205 3344
3206static ssize_t store_rotate_all(struct class_device *class_device, 3345static ssize_t store_rotate_all(struct device *device,
3207 const char *buf, size_t count) 3346 struct device_attribute *attr,const char *buf,
3347 size_t count)
3208{ 3348{
3209 struct fb_info *info; 3349 struct fb_info *info;
3210 int rotate, idx; 3350 int rotate, idx;
@@ -3227,7 +3367,8 @@ err:
3227 return count; 3367 return count;
3228} 3368}
3229 3369
3230static ssize_t show_rotate(struct class_device *class_device, char *buf) 3370static ssize_t show_rotate(struct device *device,
3371 struct device_attribute *attr,char *buf)
3231{ 3372{
3232 struct fb_info *info; 3373 struct fb_info *info;
3233 int rotate = 0, idx; 3374 int rotate = 0, idx;
@@ -3248,20 +3389,86 @@ err:
3248 return snprintf(buf, PAGE_SIZE, "%d\n", rotate); 3389 return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
3249} 3390}
3250 3391
3251static struct class_device_attribute class_device_attrs[] = { 3392static ssize_t show_cursor_blink(struct device *device,
3393 struct device_attribute *attr, char *buf)
3394{
3395 struct fb_info *info;
3396 struct fbcon_ops *ops;
3397 int idx, blink = -1;
3398
3399 if (fbcon_has_exited)
3400 return 0;
3401
3402 acquire_console_sem();
3403 idx = con2fb_map[fg_console];
3404
3405 if (idx == -1 || registered_fb[idx] == NULL)
3406 goto err;
3407
3408 info = registered_fb[idx];
3409 ops = info->fbcon_par;
3410
3411 if (!ops)
3412 goto err;
3413
3414 blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
3415err:
3416 release_console_sem();
3417 return snprintf(buf, PAGE_SIZE, "%d\n", blink);
3418}
3419
3420static ssize_t store_cursor_blink(struct device *device,
3421 struct device_attribute *attr,
3422 const char *buf, size_t count)
3423{
3424 struct fb_info *info;
3425 int blink, idx;
3426 char **last = NULL;
3427
3428 if (fbcon_has_exited)
3429 return count;
3430
3431 acquire_console_sem();
3432 idx = con2fb_map[fg_console];
3433
3434 if (idx == -1 || registered_fb[idx] == NULL)
3435 goto err;
3436
3437 info = registered_fb[idx];
3438
3439 if (!info->fbcon_par)
3440 goto err;
3441
3442 blink = simple_strtoul(buf, last, 0);
3443
3444 if (blink) {
3445 fbcon_cursor_noblink = 0;
3446 fbcon_add_cursor_timer(info);
3447 } else {
3448 fbcon_cursor_noblink = 1;
3449 fbcon_del_cursor_timer(info);
3450 }
3451
3452err:
3453 release_console_sem();
3454 return count;
3455}
3456
3457static struct device_attribute device_attrs[] = {
3252 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate), 3458 __ATTR(rotate, S_IRUGO|S_IWUSR, show_rotate, store_rotate),
3253 __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all), 3459 __ATTR(rotate_all, S_IWUSR, NULL, store_rotate_all),
3460 __ATTR(cursor_blink, S_IRUGO|S_IWUSR, show_cursor_blink,
3461 store_cursor_blink),
3254}; 3462};
3255 3463
3256static int fbcon_init_class_device(void) 3464static int fbcon_init_device(void)
3257{ 3465{
3258 int i, error = 0; 3466 int i, error = 0;
3259 3467
3260 fbcon_has_sysfs = 1; 3468 fbcon_has_sysfs = 1;
3261 3469
3262 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { 3470 for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
3263 error = class_device_create_file(fbcon_class_device, 3471 error = device_create_file(fbcon_device, &device_attrs[i]);
3264 &class_device_attrs[i]);
3265 3472
3266 if (error) 3473 if (error)
3267 break; 3474 break;
@@ -3269,8 +3476,7 @@ static int fbcon_init_class_device(void)
3269 3476
3270 if (error) { 3477 if (error) {
3271 while (--i >= 0) 3478 while (--i >= 0)
3272 class_device_remove_file(fbcon_class_device, 3479 device_remove_file(fbcon_device, &device_attrs[i]);
3273 &class_device_attrs[i]);
3274 3480
3275 fbcon_has_sysfs = 0; 3481 fbcon_has_sysfs = 0;
3276 } 3482 }
@@ -3356,16 +3562,15 @@ static int __init fb_console_init(void)
3356 3562
3357 acquire_console_sem(); 3563 acquire_console_sem();
3358 fb_register_client(&fbcon_event_notifier); 3564 fb_register_client(&fbcon_event_notifier);
3359 fbcon_class_device = 3565 fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), "fbcon");
3360 class_device_create(fb_class, NULL, MKDEV(0, 0), NULL, "fbcon");
3361 3566
3362 if (IS_ERR(fbcon_class_device)) { 3567 if (IS_ERR(fbcon_device)) {
3363 printk(KERN_WARNING "Unable to create class_device " 3568 printk(KERN_WARNING "Unable to create device "
3364 "for fbcon; errno = %ld\n", 3569 "for fbcon; errno = %ld\n",
3365 PTR_ERR(fbcon_class_device)); 3570 PTR_ERR(fbcon_device));
3366 fbcon_class_device = NULL; 3571 fbcon_device = NULL;
3367 } else 3572 } else
3368 fbcon_init_class_device(); 3573 fbcon_init_device();
3369 3574
3370 for (i = 0; i < MAX_NR_CONSOLES; i++) 3575 for (i = 0; i < MAX_NR_CONSOLES; i++)
3371 con2fb_map[i] = -1; 3576 con2fb_map[i] = -1;
@@ -3379,14 +3584,13 @@ module_init(fb_console_init);
3379 3584
3380#ifdef MODULE 3585#ifdef MODULE
3381 3586
3382static void __exit fbcon_deinit_class_device(void) 3587static void __exit fbcon_deinit_device(void)
3383{ 3588{
3384 int i; 3589 int i;
3385 3590
3386 if (fbcon_has_sysfs) { 3591 if (fbcon_has_sysfs) {
3387 for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) 3592 for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
3388 class_device_remove_file(fbcon_class_device, 3593 device_remove_file(fbcon_device, &device_attrs[i]);
3389 &class_device_attrs[i]);
3390 3594
3391 fbcon_has_sysfs = 0; 3595 fbcon_has_sysfs = 0;
3392 } 3596 }
@@ -3396,8 +3600,8 @@ static void __exit fb_console_exit(void)
3396{ 3600{
3397 acquire_console_sem(); 3601 acquire_console_sem();
3398 fb_unregister_client(&fbcon_event_notifier); 3602 fb_unregister_client(&fbcon_event_notifier);
3399 fbcon_deinit_class_device(); 3603 fbcon_deinit_device();
3400 class_device_destroy(fb_class, MKDEV(0, 0)); 3604 device_destroy(fb_class, MKDEV(0, 0));
3401 fbcon_exit(); 3605 fbcon_exit();
3402 release_console_sem(); 3606 release_console_sem();
3403 unregister_con_driver(&fb_con); 3607 unregister_con_driver(&fb_con);
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 8b762739b1e0..b0be7eac32d8 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -94,7 +94,7 @@ static inline int VAR_MATCH(struct fb_var_screeninfo *x, struct fb_var_screeninf
94struct fb_info_control { 94struct fb_info_control {
95 struct fb_info info; 95 struct fb_info info;
96 struct fb_par_control par; 96 struct fb_par_control par;
97 u32 pseudo_palette[17]; 97 u32 pseudo_palette[16];
98 98
99 struct cmap_regs __iomem *cmap_regs; 99 struct cmap_regs __iomem *cmap_regs;
100 unsigned long cmap_regs_phys; 100 unsigned long cmap_regs_phys;
diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c
index 94a66c2d2cf5..e23324d10be2 100644
--- a/drivers/video/cyblafb.c
+++ b/drivers/video/cyblafb.c
@@ -1068,15 +1068,18 @@ static int cyblafb_setcolreg(unsigned regno, unsigned red, unsigned green,
1068 out8(0x3C9, green >> 10); 1068 out8(0x3C9, green >> 10);
1069 out8(0x3C9, blue >> 10); 1069 out8(0x3C9, blue >> 10);
1070 1070
1071 } else if (bpp == 16) // RGB 565 1071 } else if (regno < 16) {
1072 ((u32 *) info->pseudo_palette)[regno] = 1072 if (bpp == 16) // RGB 565
1073 (red & 0xF800) | 1073 ((u32 *) info->pseudo_palette)[regno] =
1074 ((green & 0xFC00) >> 5) | ((blue & 0xF800) >> 11); 1074 (red & 0xF800) |
1075 else if (bpp == 32) // ARGB 8888 1075 ((green & 0xFC00) >> 5) |
1076 ((u32 *) info->pseudo_palette)[regno] = 1076 ((blue & 0xF800) >> 11);
1077 ((transp & 0xFF00) << 16) | 1077 else if (bpp == 32) // ARGB 8888
1078 ((red & 0xFF00) << 8) | 1078 ((u32 *) info->pseudo_palette)[regno] =
1079 ((green & 0xFF00)) | ((blue & 0xFF00) >> 8); 1079 ((transp & 0xFF00) << 16) |
1080 ((red & 0xFF00) << 8) |
1081 ((green & 0xFF00)) | ((blue & 0xFF00) >> 8);
1082 }
1080 1083
1081 return 0; 1084 return 0;
1082} 1085}
diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c
index ca2c54ce508e..33be46ccb54f 100644
--- a/drivers/video/epson1355fb.c
+++ b/drivers/video/epson1355fb.c
@@ -63,23 +63,12 @@
63 63
64struct epson1355_par { 64struct epson1355_par {
65 unsigned long reg_addr; 65 unsigned long reg_addr;
66 u32 pseudo_palette[16];
66}; 67};
67 68
68/* ------------------------------------------------------------------------- */ 69/* ------------------------------------------------------------------------- */
69 70
70#ifdef CONFIG_SUPERH 71#if defined(CONFIG_ARM)
71
72static inline u8 epson1355_read_reg(int index)
73{
74 return ctrl_inb(par.reg_addr + index);
75}
76
77static inline void epson1355_write_reg(u8 data, int index)
78{
79 ctrl_outb(data, par.reg_addr + index);
80}
81
82#elif defined(CONFIG_ARM)
83 72
84# ifdef CONFIG_ARCH_CEIVA 73# ifdef CONFIG_ARCH_CEIVA
85# include <asm/arch/hardware.h> 74# include <asm/arch/hardware.h>
@@ -289,7 +278,7 @@ static int epson1355fb_blank(int blank_mode, struct fb_info *info)
289 struct epson1355_par *par = info->par; 278 struct epson1355_par *par = info->par;
290 279
291 switch (blank_mode) { 280 switch (blank_mode) {
292 case FB_BLANK_UNBLANKING: 281 case FB_BLANK_UNBLANK:
293 case FB_BLANK_NORMAL: 282 case FB_BLANK_NORMAL:
294 lcd_enable(par, 1); 283 lcd_enable(par, 1);
295 backlight_enable(1); 284 backlight_enable(1);
@@ -635,7 +624,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
635 goto bail; 624 goto bail;
636 } 625 }
637 626
638 info = framebuffer_alloc(sizeof(struct epson1355_par) + sizeof(u32) * 256, &dev->dev); 627 info = framebuffer_alloc(sizeof(struct epson1355_par), &dev->dev);
639 if (!info) { 628 if (!info) {
640 rc = -ENOMEM; 629 rc = -ENOMEM;
641 goto bail; 630 goto bail;
@@ -648,7 +637,7 @@ int __init epson1355fb_probe(struct platform_device *dev)
648 rc = -ENOMEM; 637 rc = -ENOMEM;
649 goto bail; 638 goto bail;
650 } 639 }
651 info->pseudo_palette = (void *)(default_par + 1); 640 info->pseudo_palette = default_par->pseudo_palette;
652 641
653 info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN); 642 info->screen_base = ioremap(EPSON1355FB_FB_PHYS, EPSON1355FB_FB_LEN);
654 if (!info->screen_base) { 643 if (!info->screen_base) {
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 38c2e2558f5e..215ac579f901 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -33,17 +33,10 @@
33#include <linux/err.h> 33#include <linux/err.h>
34#include <linux/device.h> 34#include <linux/device.h>
35#include <linux/efi.h> 35#include <linux/efi.h>
36#include <linux/fb.h>
36 37
37#if defined(__mc68000__) || defined(CONFIG_APUS) 38#include <asm/fb.h>
38#include <asm/setup.h>
39#endif
40 39
41#include <asm/io.h>
42#include <asm/uaccess.h>
43#include <asm/page.h>
44#include <asm/pgtable.h>
45
46#include <linux/fb.h>
47 40
48 /* 41 /*
49 * Frame buffer device initialization and setup routines 42 * Frame buffer device initialization and setup routines
@@ -411,10 +404,146 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
411 } 404 }
412} 405}
413 406
407static int fb_show_logo_line(struct fb_info *info, int rotate,
408 const struct linux_logo *logo, int y,
409 unsigned int n)
410{
411 u32 *palette = NULL, *saved_pseudo_palette = NULL;
412 unsigned char *logo_new = NULL, *logo_rotate = NULL;
413 struct fb_image image;
414
415 /* Return if the frame buffer is not mapped or suspended */
416 if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
417 info->flags & FBINFO_MODULE)
418 return 0;
419
420 image.depth = 8;
421 image.data = logo->data;
422
423 if (fb_logo.needs_cmapreset)
424 fb_set_logocmap(info, logo);
425
426 if (fb_logo.needs_truepalette ||
427 fb_logo.needs_directpalette) {
428 palette = kmalloc(256 * 4, GFP_KERNEL);
429 if (palette == NULL)
430 return 0;
431
432 if (fb_logo.needs_truepalette)
433 fb_set_logo_truepalette(info, logo, palette);
434 else
435 fb_set_logo_directpalette(info, logo, palette);
436
437 saved_pseudo_palette = info->pseudo_palette;
438 info->pseudo_palette = palette;
439 }
440
441 if (fb_logo.depth <= 4) {
442 logo_new = kmalloc(logo->width * logo->height, GFP_KERNEL);
443 if (logo_new == NULL) {
444 kfree(palette);
445 if (saved_pseudo_palette)
446 info->pseudo_palette = saved_pseudo_palette;
447 return 0;
448 }
449 image.data = logo_new;
450 fb_set_logo(info, logo, logo_new, fb_logo.depth);
451 }
452
453 image.dx = 0;
454 image.dy = y;
455 image.width = logo->width;
456 image.height = logo->height;
457
458 if (rotate) {
459 logo_rotate = kmalloc(logo->width *
460 logo->height, GFP_KERNEL);
461 if (logo_rotate)
462 fb_rotate_logo(info, logo_rotate, &image, rotate);
463 }
464
465 fb_do_show_logo(info, &image, rotate, n);
466
467 kfree(palette);
468 if (saved_pseudo_palette != NULL)
469 info->pseudo_palette = saved_pseudo_palette;
470 kfree(logo_new);
471 kfree(logo_rotate);
472 return logo->height;
473}
474
475
476#ifdef CONFIG_FB_LOGO_EXTRA
477
478#define FB_LOGO_EX_NUM_MAX 10
479static struct logo_data_extra {
480 const struct linux_logo *logo;
481 unsigned int n;
482} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
483static unsigned int fb_logo_ex_num;
484
485void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
486{
487 if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
488 return;
489
490 fb_logo_ex[fb_logo_ex_num].logo = logo;
491 fb_logo_ex[fb_logo_ex_num].n = n;
492 fb_logo_ex_num++;
493}
494
495static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
496 unsigned int yres)
497{
498 unsigned int i;
499
500 /* FIXME: logo_ex supports only truecolor fb. */
501 if (info->fix.visual != FB_VISUAL_TRUECOLOR)
502 fb_logo_ex_num = 0;
503
504 for (i = 0; i < fb_logo_ex_num; i++) {
505 height += fb_logo_ex[i].logo->height;
506 if (height > yres) {
507 height -= fb_logo_ex[i].logo->height;
508 fb_logo_ex_num = i;
509 break;
510 }
511 }
512 return height;
513}
514
515static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
516{
517 unsigned int i;
518
519 for (i = 0; i < fb_logo_ex_num; i++)
520 y += fb_show_logo_line(info, rotate,
521 fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
522
523 return y;
524}
525
526#else /* !CONFIG_FB_LOGO_EXTRA */
527
528static inline int fb_prepare_extra_logos(struct fb_info *info,
529 unsigned int height,
530 unsigned int yres)
531{
532 return height;
533}
534
535static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
536{
537 return y;
538}
539
540#endif /* CONFIG_FB_LOGO_EXTRA */
541
542
414int fb_prepare_logo(struct fb_info *info, int rotate) 543int fb_prepare_logo(struct fb_info *info, int rotate)
415{ 544{
416 int depth = fb_get_color_depth(&info->var, &info->fix); 545 int depth = fb_get_color_depth(&info->var, &info->fix);
417 int yres; 546 unsigned int yres;
418 547
419 memset(&fb_logo, 0, sizeof(struct logo_data)); 548 memset(&fb_logo, 0, sizeof(struct logo_data));
420 549
@@ -456,7 +585,7 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
456 if (!fb_logo.logo) { 585 if (!fb_logo.logo) {
457 return 0; 586 return 0;
458 } 587 }
459 588
460 if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD) 589 if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
461 yres = info->var.yres; 590 yres = info->var.yres;
462 else 591 else
@@ -473,75 +602,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
473 else if (fb_logo.logo->type == LINUX_LOGO_VGA16) 602 else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
474 fb_logo.depth = 4; 603 fb_logo.depth = 4;
475 else 604 else
476 fb_logo.depth = 1; 605 fb_logo.depth = 1;
477 return fb_logo.logo->height; 606
607 return fb_prepare_extra_logos(info, fb_logo.logo->height, yres);
478} 608}
479 609
480int fb_show_logo(struct fb_info *info, int rotate) 610int fb_show_logo(struct fb_info *info, int rotate)
481{ 611{
482 u32 *palette = NULL, *saved_pseudo_palette = NULL; 612 int y;
483 unsigned char *logo_new = NULL, *logo_rotate = NULL;
484 struct fb_image image;
485
486 /* Return if the frame buffer is not mapped or suspended */
487 if (fb_logo.logo == NULL || info->state != FBINFO_STATE_RUNNING ||
488 info->flags & FBINFO_MODULE)
489 return 0;
490
491 image.depth = 8;
492 image.data = fb_logo.logo->data;
493
494 if (fb_logo.needs_cmapreset)
495 fb_set_logocmap(info, fb_logo.logo);
496
497 if (fb_logo.needs_truepalette ||
498 fb_logo.needs_directpalette) {
499 palette = kmalloc(256 * 4, GFP_KERNEL);
500 if (palette == NULL)
501 return 0;
502
503 if (fb_logo.needs_truepalette)
504 fb_set_logo_truepalette(info, fb_logo.logo, palette);
505 else
506 fb_set_logo_directpalette(info, fb_logo.logo, palette);
507
508 saved_pseudo_palette = info->pseudo_palette;
509 info->pseudo_palette = palette;
510 }
511
512 if (fb_logo.depth <= 4) {
513 logo_new = kmalloc(fb_logo.logo->width * fb_logo.logo->height,
514 GFP_KERNEL);
515 if (logo_new == NULL) {
516 kfree(palette);
517 if (saved_pseudo_palette)
518 info->pseudo_palette = saved_pseudo_palette;
519 return 0;
520 }
521 image.data = logo_new;
522 fb_set_logo(info, fb_logo.logo, logo_new, fb_logo.depth);
523 }
524 613
525 image.dx = 0; 614 y = fb_show_logo_line(info, rotate, fb_logo.logo, 0,
526 image.dy = 0; 615 num_online_cpus());
527 image.width = fb_logo.logo->width; 616 y = fb_show_extra_logos(info, y, rotate);
528 image.height = fb_logo.logo->height;
529 617
530 if (rotate) { 618 return y;
531 logo_rotate = kmalloc(fb_logo.logo->width *
532 fb_logo.logo->height, GFP_KERNEL);
533 if (logo_rotate)
534 fb_rotate_logo(info, logo_rotate, &image, rotate);
535 }
536
537 fb_do_show_logo(info, &image, rotate, num_online_cpus());
538
539 kfree(palette);
540 if (saved_pseudo_palette != NULL)
541 info->pseudo_palette = saved_pseudo_palette;
542 kfree(logo_new);
543 kfree(logo_rotate);
544 return fb_logo.logo->height;
545} 619}
546#else 620#else
547int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; } 621int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
@@ -1155,17 +1229,15 @@ fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1155} 1229}
1156#endif 1230#endif
1157 1231
1158static int 1232static int
1159fb_mmap(struct file *file, struct vm_area_struct * vma) 1233fb_mmap(struct file *file, struct vm_area_struct * vma)
1160{ 1234{
1161 int fbidx = iminor(file->f_path.dentry->d_inode); 1235 int fbidx = iminor(file->f_path.dentry->d_inode);
1162 struct fb_info *info = registered_fb[fbidx]; 1236 struct fb_info *info = registered_fb[fbidx];
1163 struct fb_ops *fb = info->fbops; 1237 struct fb_ops *fb = info->fbops;
1164 unsigned long off; 1238 unsigned long off;
1165#if !defined(__sparc__) || defined(__sparc_v9__)
1166 unsigned long start; 1239 unsigned long start;
1167 u32 len; 1240 u32 len;
1168#endif
1169 1241
1170 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 1242 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1171 return -EINVAL; 1243 return -EINVAL;
@@ -1180,12 +1252,6 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1180 return res; 1252 return res;
1181 } 1253 }
1182 1254
1183#if defined(__sparc__) && !defined(__sparc_v9__)
1184 /* Should never get here, all fb drivers should have their own
1185 mmap routines */
1186 return -EINVAL;
1187#else
1188 /* !sparc32... */
1189 lock_kernel(); 1255 lock_kernel();
1190 1256
1191 /* frame buffer memory */ 1257 /* frame buffer memory */
@@ -1209,50 +1275,11 @@ fb_mmap(struct file *file, struct vm_area_struct * vma)
1209 vma->vm_pgoff = off >> PAGE_SHIFT; 1275 vma->vm_pgoff = off >> PAGE_SHIFT;
1210 /* This is an IO map - tell maydump to skip this VMA */ 1276 /* This is an IO map - tell maydump to skip this VMA */
1211 vma->vm_flags |= VM_IO | VM_RESERVED; 1277 vma->vm_flags |= VM_IO | VM_RESERVED;
1212#if defined(__mc68000__) 1278 fb_pgprotect(file, vma, off);
1213#if defined(CONFIG_SUN3)
1214 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
1215#elif defined(CONFIG_MMU)
1216 if (CPU_IS_020_OR_030)
1217 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
1218 if (CPU_IS_040_OR_060) {
1219 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
1220 /* Use no-cache mode, serialized */
1221 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
1222 }
1223#endif
1224#elif defined(__powerpc__)
1225 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
1226 vma->vm_end - vma->vm_start,
1227 vma->vm_page_prot);
1228#elif defined(__alpha__)
1229 /* Caching is off in the I/O space quadrant by design. */
1230#elif defined(__i386__) || defined(__x86_64__)
1231 if (boot_cpu_data.x86 > 3)
1232 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
1233#elif defined(__mips__) || defined(__sparc_v9__)
1234 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1235#elif defined(__hppa__)
1236 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1237#elif defined(__arm__) || defined(__sh__) || defined(__m32r__)
1238 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1239#elif defined(__avr32__)
1240 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
1241 & ~_PAGE_CACHABLE)
1242 | (_PAGE_BUFFER | _PAGE_DIRTY));
1243#elif defined(__ia64__)
1244 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
1245 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1246 else
1247 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1248#else
1249#warning What do we have to do here??
1250#endif
1251 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, 1279 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1252 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 1280 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1253 return -EAGAIN; 1281 return -EAGAIN;
1254 return 0; 1282 return 0;
1255#endif /* !sparc32 */
1256} 1283}
1257 1284
1258static int 1285static int
@@ -1388,17 +1415,34 @@ register_framebuffer(struct fb_info *fb_info)
1388 * 1415 *
1389 * Returns negative errno on error, or zero for success. 1416 * Returns negative errno on error, or zero for success.
1390 * 1417 *
1418 * This function will also notify the framebuffer console
1419 * to release the driver.
1420 *
1421 * This is meant to be called within a driver's module_exit()
1422 * function. If this is called outside module_exit(), ensure
1423 * that the driver implements fb_open() and fb_release() to
1424 * check that no processes are using the device.
1391 */ 1425 */
1392 1426
1393int 1427int
1394unregister_framebuffer(struct fb_info *fb_info) 1428unregister_framebuffer(struct fb_info *fb_info)
1395{ 1429{
1396 struct fb_event event; 1430 struct fb_event event;
1397 int i; 1431 int i, ret = 0;
1398 1432
1399 i = fb_info->node; 1433 i = fb_info->node;
1400 if (!registered_fb[i]) 1434 if (!registered_fb[i]) {
1401 return -EINVAL; 1435 ret = -EINVAL;
1436 goto done;
1437 }
1438
1439 event.info = fb_info;
1440 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
1441
1442 if (ret) {
1443 ret = -EINVAL;
1444 goto done;
1445 }
1402 1446
1403 if (fb_info->pixmap.addr && 1447 if (fb_info->pixmap.addr &&
1404 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) 1448 (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
@@ -1410,7 +1454,8 @@ unregister_framebuffer(struct fb_info *fb_info)
1410 device_destroy(fb_class, MKDEV(FB_MAJOR, i)); 1454 device_destroy(fb_class, MKDEV(FB_MAJOR, i));
1411 event.info = fb_info; 1455 event.info = fb_info;
1412 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1456 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1413 return 0; 1457done:
1458 return ret;
1414} 1459}
1415 1460
1416/** 1461/**
diff --git a/drivers/video/fm2fb.c b/drivers/video/fm2fb.c
index 70ff55b14596..6c91c61cdb63 100644
--- a/drivers/video/fm2fb.c
+++ b/drivers/video/fm2fb.c
@@ -195,13 +195,15 @@ static int fm2fb_blank(int blank, struct fb_info *info)
195static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 195static int fm2fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
196 u_int transp, struct fb_info *info) 196 u_int transp, struct fb_info *info)
197{ 197{
198 if (regno > info->cmap.len) 198 if (regno < 16) {
199 return 1; 199 red >>= 8;
200 red >>= 8; 200 green >>= 8;
201 green >>= 8; 201 blue >>= 8;
202 blue >>= 8; 202
203 ((u32*)(info->pseudo_palette))[regno] = (red << 16) |
204 (green << 8) | blue;
205 }
203 206
204 ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue;
205 return 0; 207 return 0;
206} 208}
207 209
@@ -237,7 +239,7 @@ static int __devinit fm2fb_probe(struct zorro_dev *z,
237 if (!zorro_request_device(z,"fm2fb")) 239 if (!zorro_request_device(z,"fm2fb"))
238 return -ENXIO; 240 return -ENXIO;
239 241
240 info = framebuffer_alloc(256 * sizeof(u32), &z->dev); 242 info = framebuffer_alloc(16 * sizeof(u32), &z->dev);
241 if (!info) { 243 if (!info) {
242 zorro_release_device(z); 244 zorro_release_device(z);
243 return -ENOMEM; 245 return -ENOMEM;
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index bf0e60b5a3b6..b9b572b293d4 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -86,7 +86,7 @@ static int gbe_revision;
86 86
87static int ypan, ywrap; 87static int ypan, ywrap;
88 88
89static uint32_t pseudo_palette[256]; 89static uint32_t pseudo_palette[16];
90 90
91static char *mode_option __initdata = NULL; 91static char *mode_option __initdata = NULL;
92 92
@@ -854,8 +854,7 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
854 green >>= 8; 854 green >>= 8;
855 blue >>= 8; 855 blue >>= 8;
856 856
857 switch (info->var.bits_per_pixel) { 857 if (info->var.bits_per_pixel <= 8) {
858 case 8:
859 /* wait for the color map FIFO to have a free entry */ 858 /* wait for the color map FIFO to have a free entry */
860 for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++) 859 for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
861 udelay(10); 860 udelay(10);
@@ -864,23 +863,25 @@ static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
864 return 1; 863 return 1;
865 } 864 }
866 gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8); 865 gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
867 break; 866 } else if (regno < 16) {
868 case 15: 867 switch (info->var.bits_per_pixel) {
869 case 16: 868 case 15:
870 red >>= 3; 869 case 16:
871 green >>= 3; 870 red >>= 3;
872 blue >>= 3; 871 green >>= 3;
873 pseudo_palette[regno] = 872 blue >>= 3;
874 (red << info->var.red.offset) | 873 pseudo_palette[regno] =
875 (green << info->var.green.offset) | 874 (red << info->var.red.offset) |
876 (blue << info->var.blue.offset); 875 (green << info->var.green.offset) |
877 break; 876 (blue << info->var.blue.offset);
878 case 32: 877 break;
879 pseudo_palette[regno] = 878 case 32:
880 (red << info->var.red.offset) | 879 pseudo_palette[regno] =
881 (green << info->var.green.offset) | 880 (red << info->var.red.offset) |
882 (blue << info->var.blue.offset); 881 (green << info->var.green.offset) |
883 break; 882 (blue << info->var.blue.offset);
883 break;
884 }
884 } 885 }
885 886
886 return 0; 887 return 0;
diff --git a/drivers/video/i810/i810.h b/drivers/video/i810/i810.h
index 889e4ea5edc1..328ae6c673ec 100644
--- a/drivers/video/i810/i810.h
+++ b/drivers/video/i810/i810.h
@@ -266,7 +266,7 @@ struct i810fb_par {
266 struct i810fb_i2c_chan chan[3]; 266 struct i810fb_i2c_chan chan[3];
267 struct mutex open_lock; 267 struct mutex open_lock;
268 unsigned int use_count; 268 unsigned int use_count;
269 u32 pseudo_palette[17]; 269 u32 pseudo_palette[16];
270 unsigned long mmio_start_phys; 270 unsigned long mmio_start_phys;
271 u8 __iomem *mmio_start_virtual; 271 u8 __iomem *mmio_start_virtual;
272 u8 *edid; 272 u8 *edid;
diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h
index 80b94c19a9fa..6148300fadd6 100644
--- a/drivers/video/intelfb/intelfb.h
+++ b/drivers/video/intelfb/intelfb.h
@@ -302,7 +302,7 @@ struct intelfb_info {
302 u32 ring_lockup; 302 u32 ring_lockup;
303 303
304 /* palette */ 304 /* palette */
305 u32 pseudo_palette[17]; 305 u32 pseudo_palette[16];
306 306
307 /* chip info */ 307 /* chip info */
308 int pci_chipset; 308 int pci_chipset;
diff --git a/drivers/video/logo/Kconfig b/drivers/video/logo/Kconfig
index 9397bcef3018..da219c043c99 100644
--- a/drivers/video/logo/Kconfig
+++ b/drivers/video/logo/Kconfig
@@ -10,6 +10,11 @@ menuconfig LOGO
10 10
11if LOGO 11if LOGO
12 12
13config FB_LOGO_EXTRA
14 bool
15 depends on FB
16 default y if SPU_BASE
17
13config LOGO_LINUX_MONO 18config LOGO_LINUX_MONO
14 bool "Standard black and white Linux logo" 19 bool "Standard black and white Linux logo"
15 default y 20 default y
diff --git a/drivers/video/logo/Makefile b/drivers/video/logo/Makefile
index b985dfad6c63..a5fc4edf84e6 100644
--- a/drivers/video/logo/Makefile
+++ b/drivers/video/logo/Makefile
@@ -14,6 +14,8 @@ obj-$(CONFIG_LOGO_SUPERH_VGA16) += logo_superh_vga16.o
14obj-$(CONFIG_LOGO_SUPERH_CLUT224) += logo_superh_clut224.o 14obj-$(CONFIG_LOGO_SUPERH_CLUT224) += logo_superh_clut224.o
15obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o 15obj-$(CONFIG_LOGO_M32R_CLUT224) += logo_m32r_clut224.o
16 16
17obj-$(CONFIG_SPU_BASE) += logo_spe_clut224.o
18
17# How to generate logo's 19# How to generate logo's
18 20
19# Use logo-cfiles to retrieve list of .c files to be built 21# Use logo-cfiles to retrieve list of .c files to be built
diff --git a/drivers/video/logo/logo_spe_clut224.ppm b/drivers/video/logo/logo_spe_clut224.ppm
new file mode 100644
index 000000000000..d36ad624a79c
--- /dev/null
+++ b/drivers/video/logo/logo_spe_clut224.ppm
@@ -0,0 +1,283 @@
1P3
240 40
3255
40 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
50 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
60 0 0 0 0 0 0 0 0 0 0 0 2 2 2 6 6 6
715 15 15 21 21 21 19 19 19 14 14 14 6 6 6 2 2 2
80 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
90 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
100 0 0 0 0 0 0 0 0 0 0 0
110 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
120 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
130 0 0 0 0 0 0 0 0 2 2 2 21 21 21 55 55 55
1456 56 56 54 54 54 53 53 53 60 60 60 56 56 56 25 25 25
156 6 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
160 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
170 0 0 0 0 0 0 0 0 0 0 0
180 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
190 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
200 0 0 0 0 0 2 2 2 27 27 27 62 62 62 17 17 19
212 2 6 2 2 6 2 2 6 2 2 6 16 16 18 57 57 57
2245 45 45 8 8 8 0 0 0 0 0 0 0 0 0 0 0 0
230 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
240 0 0 0 0 0 0 0 0 0 0 0
250 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
260 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
270 0 0 0 0 0 16 16 16 62 62 62 8 8 10 2 2 6
282 2 6 2 2 6 2 2 6 12 12 14 67 67 67 16 16 17
2945 45 45 41 41 41 4 4 4 0 0 0 0 0 0 0 0 0
300 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
310 0 0 0 0 0 0 0 0 0 0 0
320 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
330 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
340 0 0 2 2 2 35 35 35 40 40 40 2 2 6 2 2 6
352 2 6 2 2 6 2 2 6 15 15 17 70 70 70 27 27 27
363 3 6 62 62 62 20 20 20 0 0 0 0 0 0 0 0 0
370 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
380 0 0 0 0 0 0 0 0 0 0 0
390 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
400 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
410 0 0 4 4 4 58 58 58 12 12 14 2 2 6 2 2 6
422 2 6 2 2 6 2 2 6 4 4 7 4 4 7 2 2 6
432 2 6 34 34 36 40 40 40 3 3 3 0 0 0 0 0 0
440 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
450 0 0 0 0 0 0 0 0 0 0 0
460 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
470 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
480 0 0 7 7 7 64 64 64 2 2 6 5 5 5 17 17 17
493 3 6 2 2 6 2 2 6 15 15 15 21 21 21 7 7 10
502 2 6 8 8 10 62 62 62 6 6 6 0 0 0 0 0 0
510 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
520 0 0 0 0 0 0 0 0 0 0 0
530 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
540 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
550 0 0 7 7 7 66 66 66 5 5 8 122 122 122 122 122 122
569 9 11 3 3 6 104 96 81 179 179 179 122 122 122 13 13 13
572 2 6 2 2 6 67 67 67 10 10 10 0 0 0 0 0 0
580 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
590 0 0 0 0 0 0 0 0 0 0 0
600 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
610 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
620 0 0 7 7 7 65 65 65 41 41 43 152 149 142 192 191 189
6348 48 49 23 23 24 228 210 210 86 86 86 192 191 189 59 59 61
642 2 6 2 2 6 64 64 64 14 14 14 0 0 0 0 0 0
650 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
660 0 0 0 0 0 0 0 0 0 0 0
670 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
680 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
690 0 0 7 7 7 66 66 66 59 59 59 59 59 61 86 86 86
7099 84 50 78 66 28 152 149 142 5 5 8 122 122 122 104 96 81
712 2 6 2 2 6 67 67 67 14 14 14 0 0 0 0 0 0
720 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
730 0 0 0 0 0 0 0 0 0 0 0
740 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
750 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
760 0 0 5 5 5 63 63 63 24 24 24 152 149 142 175 122 13
77238 184 12 220 170 13 226 181 52 112 86 32 194 165 151 46 46 47
782 2 6 2 2 6 65 65 65 17 17 17 0 0 0 0 0 0
790 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
800 0 0 0 0 0 0 0 0 0 0 0
810 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
820 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
830 0 0 5 5 5 59 59 59 21 21 21 175 122 13 231 174 11
84240 192 13 237 183 61 240 192 13 240 192 13 234 179 16 81 64 9
852 2 6 2 2 6 63 63 63 25 25 25 0 0 0 0 0 0
860 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
870 0 0 0 0 0 0 0 0 0 0 0
880 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
890 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
900 0 0 5 5 5 54 54 54 51 48 39 189 138 9 238 184 12
91240 192 13 240 192 13 240 192 13 215 161 11 207 152 19 81 64 9
9216 16 18 5 5 8 40 40 40 44 44 44 4 4 4 0 0 0
930 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
940 0 0 0 0 0 0 0 0 0 0 0
950 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
960 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
970 0 0 5 5 5 59 59 59 27 27 27 126 107 64 187 136 12
98220 170 13 201 147 20 189 138 9 198 154 46 199 182 125 70 70 70
9927 27 27 104 96 81 12 12 14 70 70 70 16 16 16 0 0 0
1000 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1010 0 0 0 0 0 0 0 0 0 0 0
1020 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1030 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1040 0 0 17 17 17 70 70 70 12 12 12 168 168 168 174 135 135
105175 122 13 175 122 13 178 151 83 192 191 189 233 233 233 179 179 179
1063 3 6 29 29 31 3 3 6 41 41 41 44 44 44 5 5 5
1070 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1080 0 0 0 0 0 0 0 0 0 0 0
1090 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1100 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1118 8 8 53 53 53 44 44 44 59 59 59 238 238 238 192 191 189
112192 191 189 192 191 189 221 205 205 240 240 240 253 253 253 253 253 253
11370 70 70 2 2 6 2 2 6 5 5 8 67 67 67 22 22 22
1142 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1150 0 0 0 0 0 0 0 0 0 0 0
1160 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1170 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 5 5
11838 38 38 56 56 56 7 7 9 221 205 205 253 253 253 233 233 233
119221 205 205 233 233 233 251 251 251 253 253 253 253 253 253 253 253 253
120192 191 189 2 2 6 2 2 6 2 2 6 25 25 25 64 64 64
12115 15 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1220 0 0 0 0 0 0 0 0 0 0 0
1230 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1240 0 0 0 0 0 0 0 0 0 0 0 2 2 2 27 27 27
12566 66 66 7 7 9 86 86 86 252 252 252 253 253 253 253 253 253
126252 252 252 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
127244 244 244 19 19 21 2 2 6 2 2 6 2 2 6 38 38 38
12854 54 54 10 10 10 0 0 0 0 0 0 0 0 0 0 0 0
1290 0 0 0 0 0 0 0 0 0 0 0
1300 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1310 0 0 0 0 0 0 0 0 0 0 0 14 14 14 62 62 62
13210 10 12 3 3 6 122 122 122 235 235 235 251 251 251 248 248 248
133235 235 235 248 248 248 252 252 252 246 246 246 233 233 233 237 228 228
134223 207 207 70 70 70 2 2 6 2 2 6 2 2 6 2 2 6
13546 46 47 38 38 38 4 4 4 0 0 0 0 0 0 0 0 0
1360 0 0 0 0 0 0 0 0 0 0 0
1370 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1380 0 0 0 0 0 0 0 0 2 2 2 33 33 33 44 44 44
1394 4 7 9 9 11 168 168 168 240 240 240 252 252 252 252 252 252
140246 246 246 253 253 253 253 253 253 251 251 251 245 241 241 233 233 233
141221 205 205 192 191 189 29 29 31 27 27 27 9 9 12 2 2 6
1423 3 6 65 65 65 15 15 15 0 0 0 0 0 0 0 0 0
1430 0 0 0 0 0 0 0 0 0 0 0
1440 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1450 0 0 0 0 0 0 0 0 6 6 6 59 59 59 19 19 21
14624 24 24 86 86 86 249 249 249 253 253 253 253 253 253 253 253 253
147253 253 253 228 210 210 241 230 230 253 253 253 253 253 253 253 253 253
148251 251 251 228 210 210 152 149 142 5 5 8 27 27 27 4 4 7
1492 2 6 46 46 47 34 34 34 2 2 2 0 0 0 0 0 0
1500 0 0 0 0 0 0 0 0 0 0 0
1510 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1520 0 0 0 0 0 0 0 0 16 16 16 67 67 67 19 19 21
15312 12 14 223 207 207 254 20 20 254 20 20 253 127 127 242 223 223
154254 20 20 253 127 127 254 48 48 242 223 223 254 86 86 254 20 20
155254 20 20 253 137 137 233 233 233 32 32 32 35 35 35 23 23 24
1562 2 6 15 15 15 60 60 60 6 6 6 0 0 0 0 0 0
1570 0 0 0 0 0 0 0 0 0 0 0
1580 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1590 0 0 0 0 0 4 4 4 38 38 38 48 48 49 22 22 22
16086 86 86 253 253 253 254 20 20 241 230 230 227 216 186 253 137 137
161253 137 137 253 253 253 253 137 137 253 137 137 254 48 48 253 253 253
162253 253 253 253 253 253 253 253 253 62 62 62 2 2 6 23 23 24
1632 2 6 2 2 6 62 62 62 17 17 17 0 0 0 0 0 0
1640 0 0 0 0 0 0 0 0 0 0 0
1650 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1660 0 0 0 0 0 14 14 14 70 70 70 14 14 14 16 16 18
167179 179 179 253 253 253 227 216 186 254 48 48 240 219 160 253 127 127
168254 20 20 253 137 137 254 86 86 231 203 141 254 20 20 254 20 20
169253 137 137 253 253 253 253 253 253 104 96 81 2 2 6 23 23 24
1702 2 6 2 2 6 46 46 47 27 27 27 0 0 0 0 0 0
1710 0 0 0 0 0 0 0 0 0 0 0
1720 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1730 0 0 4 4 4 39 39 39 42 42 43 19 19 21 13 13 13
174228 210 210 242 223 223 253 253 253 242 223 223 253 127 127 253 127 127
175253 127 127 253 127 127 253 137 137 253 253 253 254 48 48 253 253 253
176228 210 210 253 253 253 253 253 253 122 122 122 2 2 6 19 19 19
1772 2 6 2 2 6 39 39 39 38 38 38 3 3 3 0 0 0
1780 0 0 0 0 0 0 0 0 0 0 0
1790 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1800 0 0 8 8 8 60 60 60 3 3 6 33 33 33 38 38 38
181253 137 137 254 86 86 253 137 137 254 86 86 253 137 137 209 197 168
182253 127 127 253 253 253 253 253 253 253 253 253 253 127 127 254 86 86
183254 86 86 253 137 137 253 253 253 122 122 122 2 2 6 17 17 17
1842 2 6 2 2 6 34 34 36 42 42 43 3 3 3 0 0 0
1850 0 0 0 0 0 0 0 0 0 0 0
1860 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1870 0 0 13 13 13 59 59 59 2 2 6 9 9 12 56 56 56
188252 252 252 240 219 160 253 137 137 240 219 160 253 253 253 237 228 228
189254 86 86 253 253 253 253 253 253 253 253 253 253 253 253 242 223 223
190227 216 186 249 249 249 253 253 253 122 122 122 16 16 17 17 17 17
19112 12 14 3 3 6 39 39 39 38 38 38 3 3 3 0 0 0
1920 0 0 0 0 0 0 0 0 0 0 0
1930 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2
1945 5 5 22 22 22 104 96 81 187 136 12 207 152 19 51 48 39
195221 205 205 253 253 253 253 253 253 253 253 253 253 253 253 240 240 240
196250 247 243 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
197253 253 253 250 247 243 240 219 160 99 84 50 5 5 8 2 2 6
1987 7 9 46 46 47 58 58 58 35 35 35 3 3 3 0 0 0
1990 0 0 0 0 0 0 0 0 0 0 0
2000 0 0 0 0 0 0 0 0 0 0 0 8 8 8 33 33 33
20158 58 58 86 86 86 170 136 53 239 182 13 246 190 14 220 170 13
20244 38 29 179 179 179 253 253 253 253 253 253 253 253 253 240 240 240
203253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
204253 253 253 240 219 160 240 192 13 112 86 32 2 2 6 2 2 6
2053 3 6 41 33 20 220 170 13 53 53 53 4 4 4 0 0 0
2060 0 0 0 0 0 0 0 0 0 0 0
2070 0 0 0 0 0 0 0 0 2 2 2 32 32 32 150 116 44
208215 161 11 215 161 11 228 170 11 245 188 14 246 190 14 246 190 14
209187 136 12 9 9 11 122 122 122 251 251 251 253 253 253 253 253 253
210253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
211248 248 248 211 196 135 239 182 13 175 122 13 6 5 6 2 2 6
21216 14 12 187 136 12 238 184 12 84 78 65 10 10 10 0 0 0
2130 0 0 0 0 0 0 0 0 0 0 0
2140 0 0 0 0 0 0 0 0 4 4 4 53 53 53 207 152 19
215242 185 13 245 188 14 246 190 14 246 190 14 246 190 14 246 190 14
216240 192 13 81 64 9 2 2 6 86 86 86 244 244 244 253 253 253
217253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
218233 233 233 199 182 125 231 174 11 207 152 19 175 122 13 175 122 13
219201 147 20 239 182 13 244 187 14 150 116 44 35 35 35 6 6 6
2200 0 0 0 0 0 0 0 0 0 0 0
2210 0 0 0 0 0 0 0 0 5 5 5 53 53 53 201 147 20
222242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
223246 190 14 220 170 13 13 11 10 2 2 6 152 149 142 253 253 253
224253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
225235 235 235 199 182 125 228 170 11 234 177 12 226 168 11 226 168 11
226234 177 12 246 190 14 246 190 14 234 179 16 126 107 64 36 36 36
2276 6 6 0 0 0 0 0 0 0 0 0
2280 0 0 0 0 0 0 0 0 3 3 3 48 48 49 189 142 35
229242 185 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
230246 190 14 246 190 14 140 112 39 36 36 36 192 191 189 253 253 253
231253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253 253
232192 191 189 112 86 32 226 168 11 244 187 14 244 187 14 244 187 14
233245 188 14 246 190 14 246 190 14 246 190 14 242 185 13 150 116 44
23427 27 27 2 2 2 0 0 0 0 0 0
2350 0 0 0 0 0 0 0 0 6 6 6 58 58 58 189 142 35
236239 182 13 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
237246 190 14 246 190 14 239 188 14 209 197 168 253 253 253 253 253 253
238253 253 253 253 253 253 253 253 253 253 253 253 252 252 252 168 168 168
23916 16 18 97 67 8 228 170 11 245 188 14 246 190 14 246 190 14
240246 190 14 246 190 14 246 190 14 246 190 14 244 187 14 198 154 46
24135 35 35 3 3 3 0 0 0 0 0 0
2420 0 0 0 0 0 0 0 0 13 13 13 84 78 65 215 161 11
243244 187 14 246 190 14 246 190 14 246 190 14 246 190 14 246 190 14
244246 190 14 246 190 14 238 184 12 187 136 12 168 168 168 244 244 244
245253 253 253 252 252 252 240 240 240 179 179 179 67 67 67 2 2 6
2462 2 6 97 67 8 228 170 11 246 190 14 246 190 14 246 190 14
247246 190 14 246 190 14 245 188 14 234 177 12 189 142 35 86 77 61
24816 16 16 0 0 0 0 0 0 0 0 0
2490 0 0 0 0 0 0 0 0 13 13 13 103 92 56 207 152 19
250228 170 11 234 177 12 239 182 13 242 186 14 245 188 14 246 190 14
251246 190 14 246 190 14 239 182 13 189 138 9 41 33 20 10 10 12
25230 30 31 23 23 24 5 5 8 2 2 6 2 2 6 2 2 6
2534 4 6 112 86 32 215 161 11 245 188 14 246 190 14 245 188 14
254239 182 13 228 170 11 189 142 35 104 96 81 48 48 49 17 17 17
2552 2 2 0 0 0 0 0 0 0 0 0
2560 0 0 0 0 0 0 0 0 5 5 5 39 39 39 103 92 56
257141 109 44 175 122 13 187 136 12 189 138 9 207 152 19 228 170 11
258239 182 13 239 182 13 215 161 11 175 122 13 41 33 20 2 2 6
25915 15 17 20 20 22 20 20 22 20 20 22 20 20 22 8 8 10
2604 4 6 97 67 8 189 138 9 231 174 11 239 182 13 226 168 11
261189 138 9 126 107 64 59 59 59 21 21 21 5 5 5 0 0 0
2620 0 0 0 0 0 0 0 0 0 0 0
2630 0 0 0 0 0 0 0 0 0 0 0 5 5 5 17 17 17
26434 34 34 57 57 57 84 78 65 103 92 56 125 101 41 140 112 39
265175 122 13 175 122 13 175 122 13 97 67 8 72 67 58 84 78 65
26660 60 60 56 56 56 56 56 56 56 56 56 57 57 57 65 65 65
26786 86 86 95 73 34 175 122 13 187 136 12 187 136 12 175 122 13
268103 92 56 41 41 41 10 10 10 0 0 0 0 0 0 0 0 0
2690 0 0 0 0 0 0 0 0 0 0 0
2700 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2712 2 2 4 4 4 12 12 12 24 24 24 40 40 40 70 70 70
27286 77 61 95 73 34 88 72 41 72 67 58 36 36 36 10 10 10
2735 5 5 5 5 5 5 5 5 4 4 4 5 5 5 6 6 6
27422 22 22 61 61 59 88 72 41 112 86 32 112 86 32 84 78 65
27532 32 32 6 6 6 0 0 0 0 0 0 0 0 0 0 0 0
2760 0 0 0 0 0 0 0 0 0 0 0
2770 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2780 0 0 0 0 0 0 0 0 0 0 0 3 3 3 10 10 10
27921 21 21 33 33 33 31 31 31 16 16 16 2 2 2 0 0 0
2800 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2812 2 2 12 12 12 30 30 31 40 40 40 32 32 32 16 16 16
2822 2 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
2830 0 0 0 0 0 0 0 0 0 0 0
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index f7d647dda978..aa8c714d6245 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -170,7 +170,7 @@ static struct fb_fix_screeninfo macfb_fix = {
170}; 170};
171 171
172static struct fb_info fb_info; 172static struct fb_info fb_info;
173static u32 pseudo_palette[17]; 173static u32 pseudo_palette[16];
174static int inverse = 0; 174static int inverse = 0;
175static int vidtest = 0; 175static int vidtest = 0;
176 176
@@ -529,56 +529,63 @@ static int macfb_setcolreg(unsigned regno, unsigned red, unsigned green,
529 if (regno >= fb_info->cmap.len) 529 if (regno >= fb_info->cmap.len)
530 return 1; 530 return 1;
531 531
532 switch (fb_info->var.bits_per_pixel) { 532 if (fb_info->var.bits_per_pixel <= 8) {
533 case 1: 533 switch (fb_info->var.bits_per_pixel) {
534 /* We shouldn't get here */ 534 case 1:
535 break; 535 /* We shouldn't get here */
536 case 2: 536 break;
537 case 4: 537 case 2:
538 case 8: 538 case 4:
539 if (macfb_setpalette) 539 case 8:
540 macfb_setpalette(regno, red, green, blue, fb_info); 540 if (macfb_setpalette)
541 else 541 macfb_setpalette(regno, red, green, blue,
542 return 1; 542 fb_info);
543 break; 543 else
544 case 16: 544 return 1;
545 if (fb_info->var.red.offset == 10) { 545 break;
546 /* 1:5:5:5 */ 546 }
547 ((u32*) (fb_info->pseudo_palette))[regno] = 547 } else if (regno < 16) {
548 switch (fb_info->var.bits_per_pixel) {
549 case 16:
550 if (fb_info->var.red.offset == 10) {
551 /* 1:5:5:5 */
552 ((u32*) (fb_info->pseudo_palette))[regno] =
548 ((red & 0xf800) >> 1) | 553 ((red & 0xf800) >> 1) |
549 ((green & 0xf800) >> 6) | 554 ((green & 0xf800) >> 6) |
550 ((blue & 0xf800) >> 11) | 555 ((blue & 0xf800) >> 11) |
551 ((transp != 0) << 15); 556 ((transp != 0) << 15);
552 } else { 557 } else {
553 /* 0:5:6:5 */ 558 /* 0:5:6:5 */
554 ((u32*) (fb_info->pseudo_palette))[regno] = 559 ((u32*) (fb_info->pseudo_palette))[regno] =
555 ((red & 0xf800) ) | 560 ((red & 0xf800) ) |
556 ((green & 0xfc00) >> 5) | 561 ((green & 0xfc00) >> 5) |
557 ((blue & 0xf800) >> 11); 562 ((blue & 0xf800) >> 11);
563 }
564 break;
565 /* I'm pretty sure that one or the other of these
566 doesn't exist on 68k Macs */
567 case 24:
568 red >>= 8;
569 green >>= 8;
570 blue >>= 8;
571 ((u32 *)(fb_info->pseudo_palette))[regno] =
572 (red << fb_info->var.red.offset) |
573 (green << fb_info->var.green.offset) |
574 (blue << fb_info->var.blue.offset);
575 break;
576 case 32:
577 red >>= 8;
578 green >>= 8;
579 blue >>= 8;
580 ((u32 *)(fb_info->pseudo_palette))[regno] =
581 (red << fb_info->var.red.offset) |
582 (green << fb_info->var.green.offset) |
583 (blue << fb_info->var.blue.offset);
584 break;
558 } 585 }
559 break; 586 }
560 /* I'm pretty sure that one or the other of these 587
561 doesn't exist on 68k Macs */ 588 return 0;
562 case 24:
563 red >>= 8;
564 green >>= 8;
565 blue >>= 8;
566 ((u32 *)(fb_info->pseudo_palette))[regno] =
567 (red << fb_info->var.red.offset) |
568 (green << fb_info->var.green.offset) |
569 (blue << fb_info->var.blue.offset);
570 break;
571 case 32:
572 red >>= 8;
573 green >>= 8;
574 blue >>= 8;
575 ((u32 *)(fb_info->pseudo_palette))[regno] =
576 (red << fb_info->var.red.offset) |
577 (green << fb_info->var.green.offset) |
578 (blue << fb_info->var.blue.offset);
579 break;
580 }
581 return 0;
582} 589}
583 590
584static struct fb_ops macfb_ops = { 591static struct fb_ops macfb_ops = {
diff --git a/drivers/video/macmodes.c b/drivers/video/macmodes.c
index ab2149531a04..083f60321ed8 100644
--- a/drivers/video/macmodes.c
+++ b/drivers/video/macmodes.c
@@ -369,9 +369,8 @@ EXPORT_SYMBOL(mac_map_monitor_sense);
369 * 369 *
370 */ 370 */
371 371
372int __devinit mac_find_mode(struct fb_var_screeninfo *var, 372int mac_find_mode(struct fb_var_screeninfo *var, struct fb_info *info,
373 struct fb_info *info, const char *mode_option, 373 const char *mode_option, unsigned int default_bpp)
374 unsigned int default_bpp)
375{ 374{
376 const struct fb_videomode *db = NULL; 375 const struct fb_videomode *db = NULL;
377 unsigned int dbsize = 0; 376 unsigned int dbsize = 0;
diff --git a/drivers/video/macmodes.h b/drivers/video/macmodes.h
index babeb81f467d..b86ba08aac9e 100644
--- a/drivers/video/macmodes.h
+++ b/drivers/video/macmodes.h
@@ -55,10 +55,10 @@ extern int mac_vmode_to_var(int vmode, int cmode,
55extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode, 55extern int mac_var_to_vmode(const struct fb_var_screeninfo *var, int *vmode,
56 int *cmode); 56 int *cmode);
57extern int mac_map_monitor_sense(int sense); 57extern int mac_map_monitor_sense(int sense);
58extern int __devinit mac_find_mode(struct fb_var_screeninfo *var, 58extern int mac_find_mode(struct fb_var_screeninfo *var,
59 struct fb_info *info, 59 struct fb_info *info,
60 const char *mode_option, 60 const char *mode_option,
61 unsigned int default_bpp); 61 unsigned int default_bpp);
62 62
63 63
64 /* 64 /*
diff --git a/drivers/video/matrox/matroxfb_accel.c b/drivers/video/matrox/matroxfb_accel.c
index c57aaadf410c..3660d2673bdc 100644
--- a/drivers/video/matrox/matroxfb_accel.c
+++ b/drivers/video/matrox/matroxfb_accel.c
@@ -91,7 +91,6 @@ static inline void matrox_cfb4_pal(u_int32_t* pal) {
91 for (i = 0; i < 16; i++) { 91 for (i = 0; i < 16; i++) {
92 pal[i] = i * 0x11111111U; 92 pal[i] = i * 0x11111111U;
93 } 93 }
94 pal[i] = 0xFFFFFFFF;
95} 94}
96 95
97static inline void matrox_cfb8_pal(u_int32_t* pal) { 96static inline void matrox_cfb8_pal(u_int32_t* pal) {
@@ -100,7 +99,6 @@ static inline void matrox_cfb8_pal(u_int32_t* pal) {
100 for (i = 0; i < 16; i++) { 99 for (i = 0; i < 16; i++) {
101 pal[i] = i * 0x01010101U; 100 pal[i] = i * 0x01010101U;
102 } 101 }
103 pal[i] = 0x0F0F0F0F;
104} 102}
105 103
106static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area); 104static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area);
@@ -145,13 +143,10 @@ void matrox_cfbX_init(WPMINFO2) {
145 ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit; 143 ACCESS_FBINFO(fbops).fb_imageblit = matroxfb_imageblit;
146 } 144 }
147 break; 145 break;
148 case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5) { 146 case 16: if (ACCESS_FBINFO(fbcon).var.green.length == 5)
149 maccess = 0xC0000001; 147 maccess = 0xC0000001;
150 ACCESS_FBINFO(cmap[16]) = 0x7FFF7FFF; 148 else
151 } else {
152 maccess = 0x40000001; 149 maccess = 0x40000001;
153 ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
154 }
155 mopmode = M_OPMODE_16BPP; 150 mopmode = M_OPMODE_16BPP;
156 if (accel) { 151 if (accel) {
157 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea; 152 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
@@ -161,7 +156,6 @@ void matrox_cfbX_init(WPMINFO2) {
161 break; 156 break;
162 case 24: maccess = 0x00000003; 157 case 24: maccess = 0x00000003;
163 mopmode = M_OPMODE_24BPP; 158 mopmode = M_OPMODE_24BPP;
164 ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
165 if (accel) { 159 if (accel) {
166 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea; 160 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
167 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect; 161 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
@@ -170,7 +164,6 @@ void matrox_cfbX_init(WPMINFO2) {
170 break; 164 break;
171 case 32: maccess = 0x00000002; 165 case 32: maccess = 0x00000002;
172 mopmode = M_OPMODE_32BPP; 166 mopmode = M_OPMODE_32BPP;
173 ACCESS_FBINFO(cmap[16]) = 0xFFFFFFFF;
174 if (accel) { 167 if (accel) {
175 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea; 168 ACCESS_FBINFO(fbops).fb_copyarea = matroxfb_copyarea;
176 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect; 169 ACCESS_FBINFO(fbops).fb_fillrect = matroxfb_fillrect;
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 886e475f22f2..86ca7b179000 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -679,6 +679,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
679 mga_outb(M_DAC_VAL, blue); 679 mga_outb(M_DAC_VAL, blue);
680 break; 680 break;
681 case 16: 681 case 16:
682 if (regno >= 16)
683 break;
682 { 684 {
683 u_int16_t col = 685 u_int16_t col =
684 (red << ACCESS_FBINFO(fbcon).var.red.offset) | 686 (red << ACCESS_FBINFO(fbcon).var.red.offset) |
@@ -690,6 +692,8 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
690 break; 692 break;
691 case 24: 693 case 24:
692 case 32: 694 case 32:
695 if (regno >= 16)
696 break;
693 ACCESS_FBINFO(cmap[regno]) = 697 ACCESS_FBINFO(cmap[regno]) =
694 (red << ACCESS_FBINFO(fbcon).var.red.offset) | 698 (red << ACCESS_FBINFO(fbcon).var.red.offset) |
695 (green << ACCESS_FBINFO(fbcon).var.green.offset) | 699 (green << ACCESS_FBINFO(fbcon).var.green.offset) |
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
index 9c25c2f7966b..d59577c8de86 100644
--- a/drivers/video/matrox/matroxfb_base.h
+++ b/drivers/video/matrox/matroxfb_base.h
@@ -518,7 +518,7 @@ struct matrox_fb_info {
518 dll:1; 518 dll:1;
519 } memory; 519 } memory;
520 } values; 520 } values;
521 u_int32_t cmap[17]; 521 u_int32_t cmap[16];
522}; 522};
523 523
524#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon) 524#define info2minfo(info) container_of(info, struct matrox_fb_info, fbcon)
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index 03ae55b168ff..4b3344e03695 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -163,11 +163,6 @@ static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) {
163 ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004; 163 ACCESS_FBINFO(hw).crtc2.ctl = 0x00000004;
164} 164}
165 165
166static void matroxfb_dh_cfbX_init(struct matroxfb_dh_fb_info* m2info) {
167 /* no acceleration for secondary head... */
168 m2info->cmap[16] = 0xFFFFFFFF;
169}
170
171static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info, 166static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info,
172 struct fb_var_screeninfo* var) { 167 struct fb_var_screeninfo* var) {
173 unsigned int pos; 168 unsigned int pos;
@@ -385,7 +380,6 @@ static int matroxfb_dh_set_par(struct fb_info* info) {
385 } 380 }
386 } 381 }
387 up_read(&ACCESS_FBINFO(altout).lock); 382 up_read(&ACCESS_FBINFO(altout).lock);
388 matroxfb_dh_cfbX_init(m2info);
389 } 383 }
390 m2info->initialized = 1; 384 m2info->initialized = 1;
391 return 0; 385 return 0;
diff --git a/drivers/video/matrox/matroxfb_crtc2.h b/drivers/video/matrox/matroxfb_crtc2.h
index 177177609be7..1005582e843e 100644
--- a/drivers/video/matrox/matroxfb_crtc2.h
+++ b/drivers/video/matrox/matroxfb_crtc2.h
@@ -28,7 +28,7 @@ struct matroxfb_dh_fb_info {
28 28
29 unsigned int interlaced:1; 29 unsigned int interlaced:1;
30 30
31 u_int32_t cmap[17]; 31 u_int32_t cmap[16];
32}; 32};
33 33
34#endif /* __MATROXFB_CRTC2_H__ */ 34#endif /* __MATROXFB_CRTC2_H__ */
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 5d29a26b8cdf..de0d755f9019 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -273,8 +273,11 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
273 } 273 }
274 } 274 }
275 } 275 }
276
277 /* if h2/post/in/feed have not been assigned, return zero (error) */
276 if (besth2 < 2) 278 if (besth2 < 2)
277 return 0; 279 return 0;
280
278 dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant); 281 dprintk(KERN_ERR "clk: %02X %02X %02X %d %d\n", *in, *feed, *post, fxtal, fwant);
279 return fxtal * (*feed) / (*in) * ctl->den; 282 return fxtal * (*feed) / (*in) * ctl->den;
280} 283}
@@ -284,7 +287,7 @@ static unsigned int matroxfb_mavenclock(const struct matrox_pll_ctl* ctl,
284 unsigned int* in, unsigned int* feed, unsigned int* post, 287 unsigned int* in, unsigned int* feed, unsigned int* post,
285 unsigned int* htotal2) { 288 unsigned int* htotal2) {
286 unsigned int fvco; 289 unsigned int fvco;
287 unsigned int p; 290 unsigned int uninitialized_var(p);
288 291
289 fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2); 292 fvco = matroxfb_PLL_mavenclock(&maven1000_pll, ctl, htotal, vtotal, in, feed, &p, htotal2);
290 if (!fvco) 293 if (!fvco)
@@ -715,7 +718,9 @@ static int maven_find_exact_clocks(unsigned int ht, unsigned int vt,
715 m->regs[0x82] = 0x81; 718 m->regs[0x82] = 0x81;
716 719
717 for (x = 0; x < 8; x++) { 720 for (x = 0; x < 8; x++) {
718 unsigned int a, b, c, h2; 721 unsigned int c;
722 unsigned int uninitialized_var(a), uninitialized_var(b),
723 uninitialized_var(h2);
719 unsigned int h = ht + 2 + x; 724 unsigned int h = ht + 2 + x;
720 725
721 if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) { 726 if (!matroxfb_mavenclock((m->mode == MATROXFB_OUTPUT_MODE_PAL) ? &maven_PAL : &maven_NTSC, h, vt, &a, &b, &c, &h2)) {
diff --git a/drivers/video/nvidia/nv_hw.c b/drivers/video/nvidia/nv_hw.c
index aff11bbf59a7..d1a10549f543 100644
--- a/drivers/video/nvidia/nv_hw.c
+++ b/drivers/video/nvidia/nv_hw.c
@@ -150,8 +150,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
150 M = pll & 0xFF; 150 M = pll & 0xFF;
151 N = (pll >> 8) & 0xFF; 151 N = (pll >> 8) & 0xFF;
152 if (((par->Chipset & 0xfff0) == 0x0290) || 152 if (((par->Chipset & 0xfff0) == 0x0290) ||
153 ((par->Chipset & 0xfff0) == 0x0390) || 153 ((par->Chipset & 0xfff0) == 0x0390)) {
154 ((par->Chipset & 0xfff0) == 0x02E0)) {
155 MB = 1; 154 MB = 1;
156 NB = 1; 155 NB = 1;
157 } else { 156 } else {
@@ -161,7 +160,7 @@ static void nvGetClocks(struct nvidia_par *par, unsigned int *MClk,
161 *MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P; 160 *MClk = ((N * NB * par->CrystalFreqKHz) / (M * MB)) >> P;
162 161
163 pll = NV_RD32(par->PMC, 0x4000); 162 pll = NV_RD32(par->PMC, 0x4000);
164 P = (pll >> 16) & 0x03; 163 P = (pll >> 16) & 0x07;
165 pll = NV_RD32(par->PMC, 0x4004); 164 pll = NV_RD32(par->PMC, 0x4004);
166 M = pll & 0xFF; 165 M = pll & 0xFF;
167 N = (pll >> 8) & 0xFF; 166 N = (pll >> 8) & 0xFF;
@@ -892,11 +891,17 @@ void NVCalcStateExt(struct nvidia_par *par,
892 state->general = bpp == 16 ? 0x00101100 : 0x00100100; 891 state->general = bpp == 16 ? 0x00101100 : 0x00100100;
893 state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00; 892 state->repaint1 = hDisplaySize < 1280 ? 0x04 : 0x00;
894 break; 893 break;
894 case NV_ARCH_40:
895 if (!par->FlatPanel)
896 state->control = NV_RD32(par->PRAMDAC0, 0x0580) &
897 0xeffffeff;
898 /* fallthrough */
895 case NV_ARCH_10: 899 case NV_ARCH_10:
896 case NV_ARCH_20: 900 case NV_ARCH_20:
897 case NV_ARCH_30: 901 case NV_ARCH_30:
898 default: 902 default:
899 if ((par->Chipset & 0xfff0) == 0x0240) { 903 if ((par->Chipset & 0xfff0) == 0x0240 ||
904 (par->Chipset & 0xfff0) == 0x03d0) {
900 state->arbitration0 = 256; 905 state->arbitration0 = 256;
901 state->arbitration1 = 0x0480; 906 state->arbitration1 = 0x0480;
902 } else if (((par->Chipset & 0xffff) == 0x01A0) || 907 } else if (((par->Chipset & 0xffff) == 0x01A0) ||
@@ -939,7 +944,7 @@ void NVCalcStateExt(struct nvidia_par *par,
939 944
940void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) 945void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
941{ 946{
942 int i; 947 int i, j;
943 948
944 NV_WR32(par->PMC, 0x0140, 0x00000000); 949 NV_WR32(par->PMC, 0x0140, 0x00000000);
945 NV_WR32(par->PMC, 0x0200, 0xFFFF00FF); 950 NV_WR32(par->PMC, 0x0200, 0xFFFF00FF);
@@ -951,7 +956,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
951 NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF); 956 NV_WR32(par->PTIMER, 0x0100 * 4, 0xFFFFFFFF);
952 957
953 if (par->Architecture == NV_ARCH_04) { 958 if (par->Architecture == NV_ARCH_04) {
954 NV_WR32(par->PFB, 0x0200, state->config); 959 if (state)
960 NV_WR32(par->PFB, 0x0200, state->config);
955 } else if ((par->Architecture < NV_ARCH_40) || 961 } else if ((par->Architecture < NV_ARCH_40) ||
956 (par->Chipset & 0xfff0) == 0x0040) { 962 (par->Chipset & 0xfff0) == 0x0040) {
957 for (i = 0; i < 8; i++) { 963 for (i = 0; i < 8; i++) {
@@ -964,8 +970,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
964 970
965 if (((par->Chipset & 0xfff0) == 0x0090) || 971 if (((par->Chipset & 0xfff0) == 0x0090) ||
966 ((par->Chipset & 0xfff0) == 0x01D0) || 972 ((par->Chipset & 0xfff0) == 0x01D0) ||
967 ((par->Chipset & 0xfff0) == 0x02E0) || 973 ((par->Chipset & 0xfff0) == 0x0290) ||
968 ((par->Chipset & 0xfff0) == 0x0290)) 974 ((par->Chipset & 0xfff0) == 0x0390) ||
975 ((par->Chipset & 0xfff0) == 0x03D0))
969 regions = 15; 976 regions = 15;
970 for(i = 0; i < regions; i++) { 977 for(i = 0; i < regions; i++) {
971 NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0); 978 NV_WR32(par->PFB, 0x0600 + (i * 0x10), 0);
@@ -1206,16 +1213,20 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1206 NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF); 1213 NV_WR32(par->PGRAPH, 0x0608, 0xFFFFFFFF);
1207 } else { 1214 } else {
1208 if (par->Architecture >= NV_ARCH_40) { 1215 if (par->Architecture >= NV_ARCH_40) {
1209 u32 tmp;
1210
1211 NV_WR32(par->PGRAPH, 0x0084, 0x401287c0); 1216 NV_WR32(par->PGRAPH, 0x0084, 0x401287c0);
1212 NV_WR32(par->PGRAPH, 0x008C, 0x60de8051); 1217 NV_WR32(par->PGRAPH, 0x008C, 0x60de8051);
1213 NV_WR32(par->PGRAPH, 0x0090, 0x00008000); 1218 NV_WR32(par->PGRAPH, 0x0090, 0x00008000);
1214 NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f); 1219 NV_WR32(par->PGRAPH, 0x0610, 0x00be3c5f);
1220 NV_WR32(par->PGRAPH, 0x0bc4,
1221 NV_RD32(par->PGRAPH, 0x0bc4) |
1222 0x00008000);
1215 1223
1216 tmp = NV_RD32(par->REGS, 0x1540) & 0xff; 1224 j = NV_RD32(par->REGS, 0x1540) & 0xff;
1217 for(i = 0; tmp && !(tmp & 1); tmp >>= 1, i++); 1225
1218 NV_WR32(par->PGRAPH, 0x5000, i); 1226 if (j) {
1227 for (i = 0; !(j & 1); j >>= 1, i++);
1228 NV_WR32(par->PGRAPH, 0x5000, i);
1229 }
1219 1230
1220 if ((par->Chipset & 0xfff0) == 0x0040) { 1231 if ((par->Chipset & 0xfff0) == 0x0040) {
1221 NV_WR32(par->PGRAPH, 0x09b0, 1232 NV_WR32(par->PGRAPH, 0x09b0,
@@ -1250,6 +1261,7 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1250 case 0x0160: 1261 case 0x0160:
1251 case 0x01D0: 1262 case 0x01D0:
1252 case 0x0240: 1263 case 0x0240:
1264 case 0x03D0:
1253 NV_WR32(par->PMC, 0x1700, 1265 NV_WR32(par->PMC, 0x1700,
1254 NV_RD32(par->PFB, 0x020C)); 1266 NV_RD32(par->PFB, 0x020C));
1255 NV_WR32(par->PMC, 0x1704, 0); 1267 NV_WR32(par->PMC, 0x1704, 0);
@@ -1269,7 +1281,6 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1269 0x00000108); 1281 0x00000108);
1270 break; 1282 break;
1271 case 0x0220: 1283 case 0x0220:
1272 case 0x0230:
1273 NV_WR32(par->PGRAPH, 0x0860, 0); 1284 NV_WR32(par->PGRAPH, 0x0860, 0);
1274 NV_WR32(par->PGRAPH, 0x0864, 0); 1285 NV_WR32(par->PGRAPH, 0x0864, 0);
1275 NV_WR32(par->PRAMDAC, 0x0608, 1286 NV_WR32(par->PRAMDAC, 0x0608,
@@ -1277,8 +1288,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1277 0x00100000); 1288 0x00100000);
1278 break; 1289 break;
1279 case 0x0090: 1290 case 0x0090:
1280 case 0x02E0:
1281 case 0x0290: 1291 case 0x0290:
1292 case 0x0390:
1282 NV_WR32(par->PRAMDAC, 0x0608, 1293 NV_WR32(par->PRAMDAC, 0x0608,
1283 NV_RD32(par->PRAMDAC, 0x0608) | 1294 NV_RD32(par->PRAMDAC, 0x0608) |
1284 0x00100000); 1295 0x00100000);
@@ -1355,8 +1366,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1355 } else { 1366 } else {
1356 if (((par->Chipset & 0xfff0) == 0x0090) || 1367 if (((par->Chipset & 0xfff0) == 0x0090) ||
1357 ((par->Chipset & 0xfff0) == 0x01D0) || 1368 ((par->Chipset & 0xfff0) == 0x01D0) ||
1358 ((par->Chipset & 0xfff0) == 0x02E0) || 1369 ((par->Chipset & 0xfff0) == 0x0290) ||
1359 ((par->Chipset & 0xfff0) == 0x0290)) { 1370 ((par->Chipset & 0xfff0) == 0x0390) ||
1371 ((par->Chipset & 0xfff0) == 0x03D0)) {
1360 for (i = 0; i < 60; i++) { 1372 for (i = 0; i < 60; i++) {
1361 NV_WR32(par->PGRAPH, 1373 NV_WR32(par->PGRAPH,
1362 0x0D00 + i*4, 1374 0x0D00 + i*4,
@@ -1407,8 +1419,8 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1407 } else { 1419 } else {
1408 if ((par->Chipset & 0xfff0) == 0x0090 || 1420 if ((par->Chipset & 0xfff0) == 0x0090 ||
1409 (par->Chipset & 0xfff0) == 0x01D0 || 1421 (par->Chipset & 0xfff0) == 0x01D0 ||
1410 (par->Chipset & 0xfff0) == 0x02E0 || 1422 (par->Chipset & 0xfff0) == 0x0290 ||
1411 (par->Chipset & 0xfff0) == 0x0290) { 1423 (par->Chipset & 0xfff0) == 0x0390) {
1412 NV_WR32(par->PGRAPH, 0x0DF0, 1424 NV_WR32(par->PGRAPH, 0x0DF0,
1413 NV_RD32(par->PFB, 0x0200)); 1425 NV_RD32(par->PFB, 0x0200));
1414 NV_WR32(par->PGRAPH, 0x0DF4, 1426 NV_WR32(par->PGRAPH, 0x0DF4,
@@ -1495,6 +1507,12 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1495 NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001); 1507 NV_WR32(par->PFIFO, 0x0494 * 4, 0x00000001);
1496 NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001); 1508 NV_WR32(par->PFIFO, 0x0495 * 4, 0x00000001);
1497 NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001); 1509 NV_WR32(par->PFIFO, 0x0140 * 4, 0x00000001);
1510
1511 if (!state) {
1512 par->CurrentState = NULL;
1513 return;
1514 }
1515
1498 if (par->Architecture >= NV_ARCH_10) { 1516 if (par->Architecture >= NV_ARCH_10) {
1499 if (par->twoHeads) { 1517 if (par->twoHeads) {
1500 NV_WR32(par->PCRTC0, 0x0860, state->head); 1518 NV_WR32(par->PCRTC0, 0x0860, state->head);
@@ -1566,6 +1584,9 @@ void NVLoadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state)
1566 VGA_WR08(par->PCIO, 0x03D5, state->interlace); 1584 VGA_WR08(par->PCIO, 0x03D5, state->interlace);
1567 1585
1568 if (!par->FlatPanel) { 1586 if (!par->FlatPanel) {
1587 if (par->Architecture >= NV_ARCH_40)
1588 NV_WR32(par->PRAMDAC0, 0x0580, state->control);
1589
1569 NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel); 1590 NV_WR32(par->PRAMDAC0, 0x050C, state->pllsel);
1570 NV_WR32(par->PRAMDAC0, 0x0508, state->vpll); 1591 NV_WR32(par->PRAMDAC0, 0x0508, state->vpll);
1571 if (par->twoHeads) 1592 if (par->twoHeads)
@@ -1631,6 +1652,9 @@ void NVUnloadStateExt(struct nvidia_par *par, RIVA_HW_STATE * state) {
1631 state->scale = NV_RD32(par->PRAMDAC, 0x0848); 1652 state->scale = NV_RD32(par->PRAMDAC, 0x0848);
1632 state->config = NV_RD32(par->PFB, 0x0200); 1653 state->config = NV_RD32(par->PFB, 0x0200);
1633 1654
1655 if (par->Architecture >= NV_ARCH_40 && !par->FlatPanel)
1656 state->control = NV_RD32(par->PRAMDAC0, 0x0580);
1657
1634 if (par->Architecture >= NV_ARCH_10) { 1658 if (par->Architecture >= NV_ARCH_10) {
1635 if (par->twoHeads) { 1659 if (par->twoHeads) {
1636 state->head = NV_RD32(par->PCRTC0, 0x0860); 1660 state->head = NV_RD32(par->PCRTC0, 0x0860);
diff --git a/drivers/video/nvidia/nv_setup.c b/drivers/video/nvidia/nv_setup.c
index 707e2c8a13ed..82579d3a9970 100644
--- a/drivers/video/nvidia/nv_setup.c
+++ b/drivers/video/nvidia/nv_setup.c
@@ -166,11 +166,13 @@ u8 NVReadDacData(struct nvidia_par *par)
166static int NVIsConnected(struct nvidia_par *par, int output) 166static int NVIsConnected(struct nvidia_par *par, int output)
167{ 167{
168 volatile u32 __iomem *PRAMDAC = par->PRAMDAC0; 168 volatile u32 __iomem *PRAMDAC = par->PRAMDAC0;
169 u32 reg52C, reg608; 169 u32 reg52C, reg608, dac0_reg608 = 0;
170 int present; 170 int present;
171 171
172 if (output) 172 if (output) {
173 PRAMDAC += 0x800; 173 dac0_reg608 = NV_RD32(PRAMDAC, 0x0608);
174 PRAMDAC += 0x800;
175 }
174 176
175 reg52C = NV_RD32(PRAMDAC, 0x052C); 177 reg52C = NV_RD32(PRAMDAC, 0x052C);
176 reg608 = NV_RD32(PRAMDAC, 0x0608); 178 reg608 = NV_RD32(PRAMDAC, 0x0608);
@@ -194,8 +196,8 @@ static int NVIsConnected(struct nvidia_par *par, int output)
194 else 196 else
195 printk("nvidiafb: CRTC%i analog not found\n", output); 197 printk("nvidiafb: CRTC%i analog not found\n", output);
196 198
197 NV_WR32(par->PRAMDAC0, 0x0608, NV_RD32(par->PRAMDAC0, 0x0608) & 199 if (output)
198 0x0000EFFF); 200 NV_WR32(par->PRAMDAC0, 0x0608, dac0_reg608);
199 201
200 NV_WR32(PRAMDAC, 0x052C, reg52C); 202 NV_WR32(PRAMDAC, 0x052C, reg52C);
201 NV_WR32(PRAMDAC, 0x0608, reg608); 203 NV_WR32(PRAMDAC, 0x0608, reg608);
diff --git a/drivers/video/nvidia/nv_type.h b/drivers/video/nvidia/nv_type.h
index 38f7cc0a2331..2fdf77ec39fc 100644
--- a/drivers/video/nvidia/nv_type.h
+++ b/drivers/video/nvidia/nv_type.h
@@ -86,6 +86,7 @@ typedef struct _riva_hw_state {
86 u32 timingV; 86 u32 timingV;
87 u32 displayV; 87 u32 displayV;
88 u32 crtcSync; 88 u32 crtcSync;
89 u32 control;
89} RIVA_HW_STATE; 90} RIVA_HW_STATE;
90 91
91struct riva_regs { 92struct riva_regs {
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 41f63658572f..a7fe214f0f77 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -674,6 +674,7 @@ static int nvidiafb_set_par(struct fb_info *info)
674 info->fbops->fb_sync = nvidiafb_sync; 674 info->fbops->fb_sync = nvidiafb_sync;
675 info->pixmap.scan_align = 4; 675 info->pixmap.scan_align = 4;
676 info->flags &= ~FBINFO_HWACCEL_DISABLED; 676 info->flags &= ~FBINFO_HWACCEL_DISABLED;
677 info->flags |= FBINFO_READS_FAST;
677 NVResetGraphics(info); 678 NVResetGraphics(info);
678 } else { 679 } else {
679 info->fbops->fb_imageblit = cfb_imageblit; 680 info->fbops->fb_imageblit = cfb_imageblit;
@@ -682,6 +683,7 @@ static int nvidiafb_set_par(struct fb_info *info)
682 info->fbops->fb_sync = NULL; 683 info->fbops->fb_sync = NULL;
683 info->pixmap.scan_align = 1; 684 info->pixmap.scan_align = 1;
684 info->flags |= FBINFO_HWACCEL_DISABLED; 685 info->flags |= FBINFO_HWACCEL_DISABLED;
686 info->flags &= ~FBINFO_READS_FAST;
685 } 687 }
686 688
687 par->cursor_reset = 1; 689 par->cursor_reset = 1;
@@ -1193,7 +1195,8 @@ static u32 __devinit nvidia_get_chipset(struct fb_info *info)
1193 1195
1194 printk(KERN_INFO PFX "Device ID: %x \n", id); 1196 printk(KERN_INFO PFX "Device ID: %x \n", id);
1195 1197
1196 if ((id & 0xfff0) == 0x00f0) { 1198 if ((id & 0xfff0) == 0x00f0 ||
1199 (id & 0xfff0) == 0x02e0) {
1197 /* pci-e */ 1200 /* pci-e */
1198 id = NV_RD32(par->REGS, 0x1800); 1201 id = NV_RD32(par->REGS, 0x1800);
1199 1202
@@ -1238,18 +1241,16 @@ static u32 __devinit nvidia_get_arch(struct fb_info *info)
1238 case 0x0040: /* GeForce 6800 */ 1241 case 0x0040: /* GeForce 6800 */
1239 case 0x00C0: /* GeForce 6800 */ 1242 case 0x00C0: /* GeForce 6800 */
1240 case 0x0120: /* GeForce 6800 */ 1243 case 0x0120: /* GeForce 6800 */
1241 case 0x0130:
1242 case 0x0140: /* GeForce 6600 */ 1244 case 0x0140: /* GeForce 6600 */
1243 case 0x0160: /* GeForce 6200 */ 1245 case 0x0160: /* GeForce 6200 */
1244 case 0x01D0: /* GeForce 7200, 7300, 7400 */ 1246 case 0x01D0: /* GeForce 7200, 7300, 7400 */
1245 case 0x02E0: /* GeForce 7300 GT */
1246 case 0x0090: /* GeForce 7800 */ 1247 case 0x0090: /* GeForce 7800 */
1247 case 0x0210: /* GeForce 6800 */ 1248 case 0x0210: /* GeForce 6800 */
1248 case 0x0220: /* GeForce 6200 */ 1249 case 0x0220: /* GeForce 6200 */
1249 case 0x0230:
1250 case 0x0240: /* GeForce 6100 */ 1250 case 0x0240: /* GeForce 6100 */
1251 case 0x0290: /* GeForce 7900 */ 1251 case 0x0290: /* GeForce 7900 */
1252 case 0x0390: /* GeForce 7600 */ 1252 case 0x0390: /* GeForce 7600 */
1253 case 0x03D0:
1253 arch = NV_ARCH_40; 1254 arch = NV_ARCH_40;
1254 break; 1255 break;
1255 case 0x0020: /* TNT, TNT2 */ 1256 case 0x0020: /* TNT, TNT2 */
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 885b42836cbb..452433d46973 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -271,7 +271,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
271 return; 271 return;
272 } 272 }
273 273
274 size = sizeof(struct fb_info) + sizeof(u32) * 17; 274 size = sizeof(struct fb_info) + sizeof(u32) * 16;
275 275
276 info = kmalloc(size, GFP_ATOMIC); 276 info = kmalloc(size, GFP_ATOMIC);
277 277
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
new file mode 100644
index 000000000000..7f4d25b8a184
--- /dev/null
+++ b/drivers/video/omap/Kconfig
@@ -0,0 +1,58 @@
1config FB_OMAP
2 tristate "OMAP frame buffer support (EXPERIMENTAL)"
3 depends on FB
4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT
7 help
8 Frame buffer driver for OMAP based boards.
9
10config FB_OMAP_BOOTLOADER_INIT
11 bool "Check bootloader initializaion"
12 depends on FB_OMAP
13 help
14 Say Y here if you want to enable checking if the bootloader has
15 already initialized the display controller. In this case the
16 driver will skip the initialization.
17
18config FB_OMAP_CONSISTENT_DMA_SIZE
19 int "Consistent DMA memory size (MB)"
20 depends on FB_OMAP
21 range 1 14
22 default 2
23 help
24 Increase the DMA consistent memory size according to your video
25 memory needs, for example if you want to use multiple planes.
26 The size must be 2MB aligned.
27 If unsure say 1.
28
29config FB_OMAP_DMA_TUNE
30 bool "Set DMA SDRAM access priority high"
31 depends on FB_OMAP && ARCH_OMAP1
32 help
33 On systems in which video memory is in system memory
34 (SDRAM) this will speed up graphics DMA operations.
35 If you have such a system and want to use rotation
36 answer yes. Answer no if you have a dedicated video
37 memory, or don't use any of the accelerated features.
38
39config FB_OMAP_LCDC_EXTERNAL
40 bool "External LCD controller support"
41 depends on FB_OMAP
42 help
43 Say Y here, if you want to have support for boards with an
44 external LCD controller connected to the SoSSI/RFBI interface.
45
46config FB_OMAP_LCDC_HWA742
47 bool "Epson HWA742 LCD controller support"
48 depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
49 help
50 Say Y here if you want to have support for the external
51 Epson HWA742 LCD controller.
52
53config FB_OMAP_LCDC_BLIZZARD
54 bool "Epson Blizzard LCD controller support"
55 depends on FB_OMAP && FB_OMAP_LCDC_EXTERNAL
56 help
57 Say Y here if you want to have support for the external
58 Epson Blizzard LCD controller.
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile
new file mode 100644
index 000000000000..99da8b6d2c36
--- /dev/null
+++ b/drivers/video/omap/Makefile
@@ -0,0 +1,29 @@
1#
2# Makefile for the new OMAP framebuffer device driver
3#
4
5obj-$(CONFIG_FB_OMAP) += omapfb.o
6
7objs-yy := omapfb_main.o
8
9objs-y$(CONFIG_ARCH_OMAP1) += lcdc.o
10objs-y$(CONFIG_ARCH_OMAP2) += dispc.o
11
12objs-$(CONFIG_ARCH_OMAP1)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
13objs-$(CONFIG_ARCH_OMAP2)$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += rfbi.o
14
15objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
16objs-y$(CONFIG_FB_OMAP_LCDC_BLIZZARD) += blizzard.o
17
18objs-y$(CONFIG_MACH_OMAP_H4) += lcd_h4.o
19objs-y$(CONFIG_MACH_OMAP_H3) += lcd_h3.o
20objs-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
21objs-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
22objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
23objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
24objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
25objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
26objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
27
28omapfb-objs := $(objs-yy)
29
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
new file mode 100644
index 000000000000..e682940a97a4
--- /dev/null
+++ b/drivers/video/omap/blizzard.c
@@ -0,0 +1,1568 @@
1/*
2 * Epson Blizzard LCD controller driver
3 *
4 * Copyright (C) 2004-2005 Nokia Corporation
5 * Authors: Juha Yrjola <juha.yrjola@nokia.com>
6 * Imre Deak <imre.deak@nokia.com>
7 * YUV support: Jussi Laako <jussi.laako@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/fb.h>
26#include <linux/delay.h>
27#include <linux/clk.h>
28
29#include <asm/arch/dma.h>
30#include <asm/arch/omapfb.h>
31#include <asm/arch/blizzard.h>
32
33#include "dispc.h"
34
35#define MODULE_NAME "blizzard"
36
37#define BLIZZARD_REV_CODE 0x00
38#define BLIZZARD_CONFIG 0x02
39#define BLIZZARD_PLL_DIV 0x04
40#define BLIZZARD_PLL_LOCK_RANGE 0x06
41#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08
42#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a
43#define BLIZZARD_PLL_MODE 0x0c
44#define BLIZZARD_CLK_SRC 0x0e
45#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
46#define BLIZZARD_MEM_BANK0_STATUS 0x14
47#define BLIZZARD_HDISP 0x2a
48#define BLIZZARD_HNDP 0x2c
49#define BLIZZARD_VDISP0 0x2e
50#define BLIZZARD_VDISP1 0x30
51#define BLIZZARD_VNDP 0x32
52#define BLIZZARD_HSW 0x34
53#define BLIZZARD_VSW 0x38
54#define BLIZZARD_DISPLAY_MODE 0x68
55#define BLIZZARD_INPUT_WIN_X_START_0 0x6c
56#define BLIZZARD_DATA_SOURCE_SELECT 0x8e
57#define BLIZZARD_DISP_MEM_DATA_PORT 0x90
58#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92
59#define BLIZZARD_POWER_SAVE 0xE6
60#define BLIZZARD_NDISP_CTRL_STATUS 0xE8
61
62/* Data source select */
63/* For S1D13745 */
64#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00
65#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01
66#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04
67#define BLIZZARD_SRC_DISABLE_OVERLAY 0x05
68/* For S1D13744 */
69#define BLIZZARD_SRC_WRITE_LCD 0x00
70#define BLIZZARD_SRC_BLT_LCD 0x06
71
72#define BLIZZARD_COLOR_RGB565 0x01
73#define BLIZZARD_COLOR_YUV420 0x09
74
75#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */
76#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */
77
78#define BLIZZARD_AUTO_UPDATE_TIME (HZ / 20)
79
80/* Reserve 4 request slots for requests in irq context */
81#define REQ_POOL_SIZE 24
82#define IRQ_REQ_POOL_SIZE 4
83
84#define REQ_FROM_IRQ_POOL 0x01
85
86#define REQ_COMPLETE 0
87#define REQ_PENDING 1
88
89struct blizzard_reg_list {
90 int start;
91 int end;
92};
93
94/* These need to be saved / restored separately from the rest. */
95static struct blizzard_reg_list blizzard_pll_regs[] = {
96 {
97 .start = 0x04, /* Don't save PLL ctrl (0x0C) */
98 .end = 0x0a,
99 },
100 {
101 .start = 0x0e, /* Clock configuration */
102 .end = 0x0e,
103 },
104};
105
106static struct blizzard_reg_list blizzard_gen_regs[] = {
107 {
108 .start = 0x18, /* SDRAM control */
109 .end = 0x20,
110 },
111 {
112 .start = 0x28, /* LCD Panel configuration */
113 .end = 0x5a, /* HSSI interface, TV configuration */
114 },
115};
116
117static u8 blizzard_reg_cache[0x5a / 2];
118
119struct update_param {
120 int plane;
121 int x, y, width, height;
122 int out_x, out_y;
123 int out_width, out_height;
124 int color_mode;
125 int bpp;
126 int flags;
127};
128
129struct blizzard_request {
130 struct list_head entry;
131 unsigned int flags;
132
133 int (*handler)(struct blizzard_request *req);
134 void (*complete)(void *data);
135 void *complete_data;
136
137 union {
138 struct update_param update;
139 struct completion *sync;
140 } par;
141};
142
143struct plane_info {
144 unsigned long offset;
145 int pos_x, pos_y;
146 int width, height;
147 int out_width, out_height;
148 int scr_width;
149 int color_mode;
150 int bpp;
151};
152
153struct blizzard_struct {
154 enum omapfb_update_mode update_mode;
155 enum omapfb_update_mode update_mode_before_suspend;
156
157 struct timer_list auto_update_timer;
158 int stop_auto_update;
159 struct omapfb_update_window auto_update_window;
160 int enabled_planes;
161 int vid_nonstd_color;
162 int vid_scaled;
163 int last_color_mode;
164 int zoom_on;
165 int screen_width;
166 int screen_height;
167 unsigned te_connected:1;
168 unsigned vsync_only:1;
169
170 struct plane_info plane[OMAPFB_PLANE_NUM];
171
172 struct blizzard_request req_pool[REQ_POOL_SIZE];
173 struct list_head pending_req_list;
174 struct list_head free_req_list;
175 struct semaphore req_sema;
176 spinlock_t req_lock;
177
178 unsigned long sys_ck_rate;
179 struct extif_timings reg_timings, lut_timings;
180
181 u32 max_transmit_size;
182 u32 extif_clk_period;
183 int extif_clk_div;
184 unsigned long pix_tx_time;
185 unsigned long line_upd_time;
186
187 struct omapfb_device *fbdev;
188 struct lcd_ctrl_extif *extif;
189 struct lcd_ctrl *int_ctrl;
190
191 void (*power_up)(struct device *dev);
192 void (*power_down)(struct device *dev);
193
194 int version;
195} blizzard;
196
197struct lcd_ctrl blizzard_ctrl;
198
199static u8 blizzard_read_reg(u8 reg)
200{
201 u8 data;
202
203 blizzard.extif->set_bits_per_cycle(8);
204 blizzard.extif->write_command(&reg, 1);
205 blizzard.extif->read_data(&data, 1);
206
207 return data;
208}
209
210static void blizzard_write_reg(u8 reg, u8 val)
211{
212 blizzard.extif->set_bits_per_cycle(8);
213 blizzard.extif->write_command(&reg, 1);
214 blizzard.extif->write_data(&val, 1);
215}
216
217static void blizzard_restart_sdram(void)
218{
219 unsigned long tmo;
220
221 blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
222 udelay(50);
223 blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 1);
224 tmo = jiffies + msecs_to_jiffies(200);
225 while (!(blizzard_read_reg(BLIZZARD_MEM_BANK0_STATUS) & 0x01)) {
226 if (time_after(jiffies, tmo)) {
227 dev_err(blizzard.fbdev->dev,
228 "s1d1374x: SDRAM not ready");
229 break;
230 }
231 msleep(1);
232 }
233}
234
235static void blizzard_stop_sdram(void)
236{
237 blizzard_write_reg(BLIZZARD_MEM_BANK0_ACTIVATE, 0);
238}
239
240/* Wait until the last window was completely written into the controllers
241 * SDRAM and we can start transferring the next window.
242 */
243static void blizzard_wait_line_buffer(void)
244{
245 unsigned long tmo = jiffies + msecs_to_jiffies(30);
246
247 while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 7)) {
248 if (time_after(jiffies, tmo)) {
249 if (printk_ratelimit())
250 dev_err(blizzard.fbdev->dev,
251 "s1d1374x: line buffer not ready\n");
252 break;
253 }
254 }
255}
256
257/* Wait until the YYC color space converter is idle. */
258static void blizzard_wait_yyc(void)
259{
260 unsigned long tmo = jiffies + msecs_to_jiffies(30);
261
262 while (blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS) & (1 << 4)) {
263 if (time_after(jiffies, tmo)) {
264 if (printk_ratelimit())
265 dev_err(blizzard.fbdev->dev,
266 "s1d1374x: YYC not ready\n");
267 break;
268 }
269 }
270}
271
272static void disable_overlay(void)
273{
274 blizzard_write_reg(BLIZZARD_DATA_SOURCE_SELECT,
275 BLIZZARD_SRC_DISABLE_OVERLAY);
276}
277
278static void set_window_regs(int x_start, int y_start, int x_end, int y_end,
279 int x_out_start, int y_out_start,
280 int x_out_end, int y_out_end, int color_mode,
281 int zoom_off, int flags)
282{
283 u8 tmp[18];
284 u8 cmd;
285
286 x_end--;
287 y_end--;
288 tmp[0] = x_start;
289 tmp[1] = x_start >> 8;
290 tmp[2] = y_start;
291 tmp[3] = y_start >> 8;
292 tmp[4] = x_end;
293 tmp[5] = x_end >> 8;
294 tmp[6] = y_end;
295 tmp[7] = y_end >> 8;
296
297 x_out_end--;
298 y_out_end--;
299 tmp[8] = x_out_start;
300 tmp[9] = x_out_start >> 8;
301 tmp[10] = y_out_start;
302 tmp[11] = y_out_start >> 8;
303 tmp[12] = x_out_end;
304 tmp[13] = x_out_end >> 8;
305 tmp[14] = y_out_end;
306 tmp[15] = y_out_end >> 8;
307
308 tmp[16] = color_mode;
309 if (zoom_off && blizzard.version == BLIZZARD_VERSION_S1D13745)
310 tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
311 else if (flags & OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY)
312 tmp[17] = BLIZZARD_SRC_WRITE_OVERLAY_ENABLE;
313 else
314 tmp[17] = blizzard.version == BLIZZARD_VERSION_S1D13744 ?
315 BLIZZARD_SRC_WRITE_LCD :
316 BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
317
318 blizzard.extif->set_bits_per_cycle(8);
319 cmd = BLIZZARD_INPUT_WIN_X_START_0;
320 blizzard.extif->write_command(&cmd, 1);
321 blizzard.extif->write_data(tmp, 18);
322}
323
324static void enable_tearsync(int y, int width, int height, int screen_height,
325 int out_height, int force_vsync)
326{
327 u8 b;
328
329 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
330 b |= 1 << 3;
331 blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
332
333 if (likely(blizzard.vsync_only || force_vsync)) {
334 blizzard.extif->enable_tearsync(1, 0);
335 return;
336 }
337
338 if (width * blizzard.pix_tx_time < blizzard.line_upd_time) {
339 blizzard.extif->enable_tearsync(1, 0);
340 return;
341 }
342
343 if ((width * blizzard.pix_tx_time / 1000) * height <
344 (y + out_height) * (blizzard.line_upd_time / 1000)) {
345 blizzard.extif->enable_tearsync(1, 0);
346 return;
347 }
348
349 blizzard.extif->enable_tearsync(1, y + 1);
350}
351
352static void disable_tearsync(void)
353{
354 u8 b;
355
356 blizzard.extif->enable_tearsync(0, 0);
357 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
358 b &= ~(1 << 3);
359 blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
360 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
361}
362
363static inline void set_extif_timings(const struct extif_timings *t);
364
365static inline struct blizzard_request *alloc_req(void)
366{
367 unsigned long flags;
368 struct blizzard_request *req;
369 int req_flags = 0;
370
371 if (!in_interrupt())
372 down(&blizzard.req_sema);
373 else
374 req_flags = REQ_FROM_IRQ_POOL;
375
376 spin_lock_irqsave(&blizzard.req_lock, flags);
377 BUG_ON(list_empty(&blizzard.free_req_list));
378 req = list_entry(blizzard.free_req_list.next,
379 struct blizzard_request, entry);
380 list_del(&req->entry);
381 spin_unlock_irqrestore(&blizzard.req_lock, flags);
382
383 INIT_LIST_HEAD(&req->entry);
384 req->flags = req_flags;
385
386 return req;
387}
388
389static inline void free_req(struct blizzard_request *req)
390{
391 unsigned long flags;
392
393 spin_lock_irqsave(&blizzard.req_lock, flags);
394
395 list_del(&req->entry);
396 list_add(&req->entry, &blizzard.free_req_list);
397 if (!(req->flags & REQ_FROM_IRQ_POOL))
398 up(&blizzard.req_sema);
399
400 spin_unlock_irqrestore(&blizzard.req_lock, flags);
401}
402
403static void process_pending_requests(void)
404{
405 unsigned long flags;
406
407 spin_lock_irqsave(&blizzard.req_lock, flags);
408
409 while (!list_empty(&blizzard.pending_req_list)) {
410 struct blizzard_request *req;
411 void (*complete)(void *);
412 void *complete_data;
413
414 req = list_entry(blizzard.pending_req_list.next,
415 struct blizzard_request, entry);
416 spin_unlock_irqrestore(&blizzard.req_lock, flags);
417
418 if (req->handler(req) == REQ_PENDING)
419 return;
420
421 complete = req->complete;
422 complete_data = req->complete_data;
423 free_req(req);
424
425 if (complete)
426 complete(complete_data);
427
428 spin_lock_irqsave(&blizzard.req_lock, flags);
429 }
430
431 spin_unlock_irqrestore(&blizzard.req_lock, flags);
432}
433
434static void submit_req_list(struct list_head *head)
435{
436 unsigned long flags;
437 int process = 1;
438
439 spin_lock_irqsave(&blizzard.req_lock, flags);
440 if (likely(!list_empty(&blizzard.pending_req_list)))
441 process = 0;
442 list_splice_init(head, blizzard.pending_req_list.prev);
443 spin_unlock_irqrestore(&blizzard.req_lock, flags);
444
445 if (process)
446 process_pending_requests();
447}
448
449static void request_complete(void *data)
450{
451 struct blizzard_request *req = (struct blizzard_request *)data;
452 void (*complete)(void *);
453 void *complete_data;
454
455 complete = req->complete;
456 complete_data = req->complete_data;
457
458 free_req(req);
459
460 if (complete)
461 complete(complete_data);
462
463 process_pending_requests();
464}
465
466
467static int do_full_screen_update(struct blizzard_request *req)
468{
469 int i;
470 int flags;
471
472 for (i = 0; i < 3; i++) {
473 struct plane_info *p = &blizzard.plane[i];
474 if (!(blizzard.enabled_planes & (1 << i))) {
475 blizzard.int_ctrl->enable_plane(i, 0);
476 continue;
477 }
478 dev_dbg(blizzard.fbdev->dev, "pw %d ph %d\n",
479 p->width, p->height);
480 blizzard.int_ctrl->setup_plane(i,
481 OMAPFB_CHANNEL_OUT_LCD, p->offset,
482 p->scr_width, p->pos_x, p->pos_y,
483 p->width, p->height,
484 p->color_mode);
485 blizzard.int_ctrl->enable_plane(i, 1);
486 }
487
488 dev_dbg(blizzard.fbdev->dev, "sw %d sh %d\n",
489 blizzard.screen_width, blizzard.screen_height);
490 blizzard_wait_line_buffer();
491 flags = req->par.update.flags;
492 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
493 enable_tearsync(0, blizzard.screen_width,
494 blizzard.screen_height,
495 blizzard.screen_height,
496 blizzard.screen_height,
497 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
498 else
499 disable_tearsync();
500
501 set_window_regs(0, 0, blizzard.screen_width, blizzard.screen_height,
502 0, 0, blizzard.screen_width, blizzard.screen_height,
503 BLIZZARD_COLOR_RGB565, blizzard.zoom_on, flags);
504 blizzard.zoom_on = 0;
505
506 blizzard.extif->set_bits_per_cycle(16);
507 /* set_window_regs has left the register index at the right
508 * place, so no need to set it here.
509 */
510 blizzard.extif->transfer_area(blizzard.screen_width,
511 blizzard.screen_height,
512 request_complete, req);
513 return REQ_PENDING;
514}
515
516/* Setup all planes with an overlapping area with the update window. */
517static int do_partial_update(struct blizzard_request *req, int plane,
518 int x, int y, int w, int h,
519 int x_out, int y_out, int w_out, int h_out,
520 int wnd_color_mode, int bpp)
521{
522 int i;
523 int gx1, gy1, gx2, gy2;
524 int gx1_out, gy1_out, gx2_out, gy2_out;
525 int color_mode;
526 int flags;
527 int zoom_off;
528
529 /* Global coordinates, relative to pixel 0,0 of the LCD */
530 gx1 = x + blizzard.plane[plane].pos_x;
531 gy1 = y + blizzard.plane[plane].pos_y;
532 gx2 = gx1 + w;
533 gy2 = gy1 + h;
534
535 flags = req->par.update.flags;
536 if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
537 gx1_out = gx1;
538 gy1_out = gy1;
539 gx2_out = gx1 + w * 2;
540 gy2_out = gy1 + h * 2;
541 } else {
542 gx1_out = x_out + blizzard.plane[plane].pos_x;
543 gy1_out = y_out + blizzard.plane[plane].pos_y;
544 gx2_out = gx1_out + w_out;
545 gy2_out = gy1_out + h_out;
546 }
547 zoom_off = blizzard.zoom_on && gx1 == 0 && gy1 == 0 &&
548 w == blizzard.screen_width && h == blizzard.screen_height;
549 blizzard.zoom_on = (!zoom_off && blizzard.zoom_on) ||
550 (w < w_out || h < h_out);
551
552 for (i = 0; i < OMAPFB_PLANE_NUM; i++) {
553 struct plane_info *p = &blizzard.plane[i];
554 int px1, py1;
555 int px2, py2;
556 int pw, ph;
557 int pposx, pposy;
558 unsigned long offset;
559
560 if (!(blizzard.enabled_planes & (1 << i)) ||
561 (wnd_color_mode && i != plane)) {
562 blizzard.int_ctrl->enable_plane(i, 0);
563 continue;
564 }
565 /* Plane coordinates */
566 if (i == plane) {
567 /* Plane in which we are doing the update.
568 * Local coordinates are the one in the update
569 * request.
570 */
571 px1 = x;
572 py1 = y;
573 px2 = x + w;
574 py2 = y + h;
575 pposx = 0;
576 pposy = 0;
577 } else {
578 /* Check if this plane has an overlapping part */
579 px1 = gx1 - p->pos_x;
580 py1 = gy1 - p->pos_y;
581 px2 = gx2 - p->pos_x;
582 py2 = gy2 - p->pos_y;
583 if (px1 >= p->width || py1 >= p->height ||
584 px2 <= 0 || py2 <= 0) {
585 blizzard.int_ctrl->enable_plane(i, 0);
586 continue;
587 }
588 /* Calculate the coordinates for the overlapping
589 * part in the plane's local coordinates.
590 */
591 pposx = -px1;
592 pposy = -py1;
593 if (px1 < 0)
594 px1 = 0;
595 if (py1 < 0)
596 py1 = 0;
597 if (px2 > p->width)
598 px2 = p->width;
599 if (py2 > p->height)
600 py2 = p->height;
601 if (pposx < 0)
602 pposx = 0;
603 if (pposy < 0)
604 pposy = 0;
605 }
606 pw = px2 - px1;
607 ph = py2 - py1;
608 offset = p->offset + (p->scr_width * py1 + px1) * p->bpp / 8;
609 if (wnd_color_mode)
610 /* Window embedded in the plane with a differing
611 * color mode / bpp. Calculate the number of DMA
612 * transfer elements in terms of the plane's bpp.
613 */
614 pw = (pw + 1) * bpp / p->bpp;
615#ifdef VERBOSE
616 dev_dbg(blizzard.fbdev->dev,
617 "plane %d offset %#08lx pposx %d pposy %d "
618 "px1 %d py1 %d pw %d ph %d\n",
619 i, offset, pposx, pposy, px1, py1, pw, ph);
620#endif
621 blizzard.int_ctrl->setup_plane(i,
622 OMAPFB_CHANNEL_OUT_LCD, offset,
623 p->scr_width,
624 pposx, pposy, pw, ph,
625 p->color_mode);
626
627 blizzard.int_ctrl->enable_plane(i, 1);
628 }
629
630 switch (wnd_color_mode) {
631 case OMAPFB_COLOR_YUV420:
632 color_mode = BLIZZARD_COLOR_YUV420;
633 /* Currently only the 16 bits/pixel cycle format is
634 * supported on the external interface. Adjust the number
635 * of transfer elements per line for 12bpp format.
636 */
637 w = (w + 1) * 3 / 4;
638 break;
639 default:
640 color_mode = BLIZZARD_COLOR_RGB565;
641 break;
642 }
643
644 blizzard_wait_line_buffer();
645 if (blizzard.last_color_mode == BLIZZARD_COLOR_YUV420)
646 blizzard_wait_yyc();
647 blizzard.last_color_mode = color_mode;
648 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
649 enable_tearsync(gy1, w, h,
650 blizzard.screen_height,
651 h_out,
652 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
653 else
654 disable_tearsync();
655
656 set_window_regs(gx1, gy1, gx2, gy2, gx1_out, gy1_out, gx2_out, gy2_out,
657 color_mode, zoom_off, flags);
658
659 blizzard.extif->set_bits_per_cycle(16);
660 /* set_window_regs has left the register index at the right
661 * place, so no need to set it here.
662 */
663 blizzard.extif->transfer_area(w, h, request_complete, req);
664
665 return REQ_PENDING;
666}
667
668static int send_frame_handler(struct blizzard_request *req)
669{
670 struct update_param *par = &req->par.update;
671 int plane = par->plane;
672
673#ifdef VERBOSE
674 dev_dbg(blizzard.fbdev->dev,
675 "send_frame: x %d y %d w %d h %d "
676 "x_out %d y_out %d w_out %d h_out %d "
677 "color_mode %04x flags %04x planes %01x\n",
678 par->x, par->y, par->width, par->height,
679 par->out_x, par->out_y, par->out_width, par->out_height,
680 par->color_mode, par->flags, blizzard.enabled_planes);
681#endif
682 if (par->flags & OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY)
683 disable_overlay();
684
685 if ((blizzard.enabled_planes & blizzard.vid_nonstd_color) ||
686 (blizzard.enabled_planes & blizzard.vid_scaled))
687 return do_full_screen_update(req);
688
689 return do_partial_update(req, plane, par->x, par->y,
690 par->width, par->height,
691 par->out_x, par->out_y,
692 par->out_width, par->out_height,
693 par->color_mode, par->bpp);
694}
695
696static void send_frame_complete(void *data)
697{
698}
699
700#define ADD_PREQ(_x, _y, _w, _h, _x_out, _y_out, _w_out, _h_out) do { \
701 req = alloc_req(); \
702 req->handler = send_frame_handler; \
703 req->complete = send_frame_complete; \
704 req->par.update.plane = plane_idx; \
705 req->par.update.x = _x; \
706 req->par.update.y = _y; \
707 req->par.update.width = _w; \
708 req->par.update.height = _h; \
709 req->par.update.out_x = _x_out; \
710 req->par.update.out_y = _y_out; \
711 req->par.update.out_width = _w_out; \
712 req->par.update.out_height = _h_out; \
713 req->par.update.bpp = bpp; \
714 req->par.update.color_mode = color_mode;\
715 req->par.update.flags = flags; \
716 list_add_tail(&req->entry, req_head); \
717} while(0)
718
719static void create_req_list(int plane_idx,
720 struct omapfb_update_window *win,
721 struct list_head *req_head)
722{
723 struct blizzard_request *req;
724 int x = win->x;
725 int y = win->y;
726 int width = win->width;
727 int height = win->height;
728 int x_out = win->out_x;
729 int y_out = win->out_y;
730 int width_out = win->out_width;
731 int height_out = win->out_height;
732 int color_mode;
733 int bpp;
734 int flags;
735 unsigned int ystart = y;
736 unsigned int yspan = height;
737 unsigned int ystart_out = y_out;
738 unsigned int yspan_out = height_out;
739
740 flags = win->format & ~OMAPFB_FORMAT_MASK;
741 color_mode = win->format & OMAPFB_FORMAT_MASK;
742 switch (color_mode) {
743 case OMAPFB_COLOR_YUV420:
744 /* Embedded window with different color mode */
745 bpp = 12;
746 /* X, Y, height must be aligned at 2, width at 4 pixels */
747 x &= ~1;
748 y &= ~1;
749 height = yspan = height & ~1;
750 width = width & ~3;
751 break;
752 default:
753 /* Same as the plane color mode */
754 bpp = blizzard.plane[plane_idx].bpp;
755 break;
756 }
757 if (width * height * bpp / 8 > blizzard.max_transmit_size) {
758 yspan = blizzard.max_transmit_size / (width * bpp / 8);
759 yspan_out = yspan * height_out / height;
760 ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
761 width_out, yspan_out);
762 ystart += yspan;
763 ystart_out += yspan_out;
764 yspan = height - yspan;
765 yspan_out = height_out - yspan_out;
766 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
767 }
768
769 ADD_PREQ(x, ystart, width, yspan, x_out, ystart_out,
770 width_out, yspan_out);
771}
772
773static void auto_update_complete(void *data)
774{
775 if (!blizzard.stop_auto_update)
776 mod_timer(&blizzard.auto_update_timer,
777 jiffies + BLIZZARD_AUTO_UPDATE_TIME);
778}
779
780static void blizzard_update_window_auto(unsigned long arg)
781{
782 LIST_HEAD(req_list);
783 struct blizzard_request *last;
784 struct omapfb_plane_struct *plane;
785
786 plane = blizzard.fbdev->fb_info[0]->par;
787 create_req_list(plane->idx,
788 &blizzard.auto_update_window, &req_list);
789 last = list_entry(req_list.prev, struct blizzard_request, entry);
790
791 last->complete = auto_update_complete;
792 last->complete_data = NULL;
793
794 submit_req_list(&req_list);
795}
796
797int blizzard_update_window_async(struct fb_info *fbi,
798 struct omapfb_update_window *win,
799 void (*complete_callback)(void *arg),
800 void *complete_callback_data)
801{
802 LIST_HEAD(req_list);
803 struct blizzard_request *last;
804 struct omapfb_plane_struct *plane = fbi->par;
805
806 if (unlikely(blizzard.update_mode != OMAPFB_MANUAL_UPDATE))
807 return -EINVAL;
808 if (unlikely(!blizzard.te_connected &&
809 (win->format & OMAPFB_FORMAT_FLAG_TEARSYNC)))
810 return -EINVAL;
811
812 create_req_list(plane->idx, win, &req_list);
813 last = list_entry(req_list.prev, struct blizzard_request, entry);
814
815 last->complete = complete_callback;
816 last->complete_data = (void *)complete_callback_data;
817
818 submit_req_list(&req_list);
819
820 return 0;
821}
822EXPORT_SYMBOL(blizzard_update_window_async);
823
824static int update_full_screen(void)
825{
826 return blizzard_update_window_async(blizzard.fbdev->fb_info[0],
827 &blizzard.auto_update_window, NULL, NULL);
828
829}
830
831static int blizzard_setup_plane(int plane, int channel_out,
832 unsigned long offset, int screen_width,
833 int pos_x, int pos_y, int width, int height,
834 int color_mode)
835{
836 struct plane_info *p;
837
838#ifdef VERBOSE
839 dev_dbg(blizzard.fbdev->dev,
840 "plane %d ch_out %d offset %#08lx scr_width %d "
841 "pos_x %d pos_y %d width %d height %d color_mode %d\n",
842 plane, channel_out, offset, screen_width,
843 pos_x, pos_y, width, height, color_mode);
844#endif
845 if ((unsigned)plane > OMAPFB_PLANE_NUM)
846 return -EINVAL;
847 p = &blizzard.plane[plane];
848
849 switch (color_mode) {
850 case OMAPFB_COLOR_YUV422:
851 case OMAPFB_COLOR_YUY422:
852 p->bpp = 16;
853 blizzard.vid_nonstd_color &= ~(1 << plane);
854 break;
855 case OMAPFB_COLOR_YUV420:
856 p->bpp = 12;
857 blizzard.vid_nonstd_color |= 1 << plane;
858 break;
859 case OMAPFB_COLOR_RGB565:
860 p->bpp = 16;
861 blizzard.vid_nonstd_color &= ~(1 << plane);
862 break;
863 default:
864 return -EINVAL;
865 }
866
867 p->offset = offset;
868 p->pos_x = pos_x;
869 p->pos_y = pos_y;
870 p->width = width;
871 p->height = height;
872 p->scr_width = screen_width;
873 if (!p->out_width)
874 p->out_width = width;
875 if (!p->out_height)
876 p->out_height = height;
877
878 p->color_mode = color_mode;
879
880 return 0;
881}
882
883static int blizzard_set_scale(int plane, int orig_w, int orig_h,
884 int out_w, int out_h)
885{
886 struct plane_info *p = &blizzard.plane[plane];
887 int r;
888
889 dev_dbg(blizzard.fbdev->dev,
890 "plane %d orig_w %d orig_h %d out_w %d out_h %d\n",
891 plane, orig_w, orig_h, out_w, out_h);
892 if ((unsigned)plane > OMAPFB_PLANE_NUM)
893 return -ENODEV;
894
895 r = blizzard.int_ctrl->set_scale(plane, orig_w, orig_h, out_w, out_h);
896 if (r < 0)
897 return r;
898
899 p->width = orig_w;
900 p->height = orig_h;
901 p->out_width = out_w;
902 p->out_height = out_h;
903 if (orig_w == out_w && orig_h == out_h)
904 blizzard.vid_scaled &= ~(1 << plane);
905 else
906 blizzard.vid_scaled |= 1 << plane;
907
908 return 0;
909}
910
911static int blizzard_enable_plane(int plane, int enable)
912{
913 if (enable)
914 blizzard.enabled_planes |= 1 << plane;
915 else
916 blizzard.enabled_planes &= ~(1 << plane);
917
918 return 0;
919}
920
921static int sync_handler(struct blizzard_request *req)
922{
923 complete(req->par.sync);
924 return REQ_COMPLETE;
925}
926
927static void blizzard_sync(void)
928{
929 LIST_HEAD(req_list);
930 struct blizzard_request *req;
931 struct completion comp;
932
933 req = alloc_req();
934
935 req->handler = sync_handler;
936 req->complete = NULL;
937 init_completion(&comp);
938 req->par.sync = &comp;
939
940 list_add(&req->entry, &req_list);
941 submit_req_list(&req_list);
942
943 wait_for_completion(&comp);
944}
945
946
947static void blizzard_bind_client(struct omapfb_notifier_block *nb)
948{
949 if (blizzard.update_mode == OMAPFB_MANUAL_UPDATE) {
950 omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
951 }
952}
953
954static int blizzard_set_update_mode(enum omapfb_update_mode mode)
955{
956 if (unlikely(mode != OMAPFB_MANUAL_UPDATE &&
957 mode != OMAPFB_AUTO_UPDATE &&
958 mode != OMAPFB_UPDATE_DISABLED))
959 return -EINVAL;
960
961 if (mode == blizzard.update_mode)
962 return 0;
963
964 dev_info(blizzard.fbdev->dev, "s1d1374x: setting update mode to %s\n",
965 mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
966 (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
967
968 switch (blizzard.update_mode) {
969 case OMAPFB_MANUAL_UPDATE:
970 omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_DISABLED);
971 break;
972 case OMAPFB_AUTO_UPDATE:
973 blizzard.stop_auto_update = 1;
974 del_timer_sync(&blizzard.auto_update_timer);
975 break;
976 case OMAPFB_UPDATE_DISABLED:
977 break;
978 }
979
980 blizzard.update_mode = mode;
981 blizzard_sync();
982 blizzard.stop_auto_update = 0;
983
984 switch (mode) {
985 case OMAPFB_MANUAL_UPDATE:
986 omapfb_notify_clients(blizzard.fbdev, OMAPFB_EVENT_READY);
987 break;
988 case OMAPFB_AUTO_UPDATE:
989 blizzard_update_window_auto(0);
990 break;
991 case OMAPFB_UPDATE_DISABLED:
992 break;
993 }
994
995 return 0;
996}
997
998static enum omapfb_update_mode blizzard_get_update_mode(void)
999{
1000 return blizzard.update_mode;
1001}
1002
1003static inline void set_extif_timings(const struct extif_timings *t)
1004{
1005 blizzard.extif->set_timings(t);
1006}
1007
1008static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
1009{
1010 int bus_tick = blizzard.extif_clk_period * div;
1011 return (ps + bus_tick - 1) / bus_tick * bus_tick;
1012}
1013
1014static int calc_reg_timing(unsigned long sysclk, int div)
1015{
1016 struct extif_timings *t;
1017 unsigned long systim;
1018
1019 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
1020 * AccessTime 2 ns + 12.2 ns (regs),
1021 * WEOffTime = WEOnTime + 1 ns,
1022 * REOffTime = REOnTime + 12 ns (regs),
1023 * CSOffTime = REOffTime + 1 ns
1024 * ReadCycle = 2ns + 2*SYSCLK (regs),
1025 * WriteCycle = 2*SYSCLK + 2 ns,
1026 * CSPulseWidth = 10 ns */
1027
1028 systim = 1000000000 / (sysclk / 1000);
1029 dev_dbg(blizzard.fbdev->dev,
1030 "Blizzard systim %lu ps extif_clk_period %u div %d\n",
1031 systim, blizzard.extif_clk_period, div);
1032
1033 t = &blizzard.reg_timings;
1034 memset(t, 0, sizeof(*t));
1035
1036 t->clk_div = div;
1037
1038 t->cs_on_time = 0;
1039 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1040 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1041 t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
1042 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
1043 t->re_off_time = round_to_extif_ticks(t->re_on_time + 13000, div);
1044 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
1045 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
1046 if (t->we_cycle_time < t->we_off_time)
1047 t->we_cycle_time = t->we_off_time;
1048 t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
1049 if (t->re_cycle_time < t->re_off_time)
1050 t->re_cycle_time = t->re_off_time;
1051 t->cs_pulse_width = 0;
1052
1053 dev_dbg(blizzard.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
1054 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
1055 dev_dbg(blizzard.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
1056 t->we_on_time, t->we_off_time, t->re_cycle_time,
1057 t->we_cycle_time);
1058 dev_dbg(blizzard.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
1059 t->access_time, t->cs_pulse_width);
1060
1061 return blizzard.extif->convert_timings(t);
1062}
1063
1064static int calc_lut_timing(unsigned long sysclk, int div)
1065{
1066 struct extif_timings *t;
1067 unsigned long systim;
1068
1069 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
1070 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
1071 * WEOffTime = WEOnTime + 1 ns,
1072 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
1073 * CSOffTime = REOffTime + 1 ns
1074 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
1075 * WriteCycle = 2*SYSCLK + 2 ns,
1076 * CSPulseWidth = 10 ns */
1077
1078 systim = 1000000000 / (sysclk / 1000);
1079 dev_dbg(blizzard.fbdev->dev,
1080 "Blizzard systim %lu ps extif_clk_period %u div %d\n",
1081 systim, blizzard.extif_clk_period, div);
1082
1083 t = &blizzard.lut_timings;
1084 memset(t, 0, sizeof(*t));
1085
1086 t->clk_div = div;
1087
1088 t->cs_on_time = 0;
1089 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1090 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
1091 t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
1092 26000, div);
1093 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
1094 t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
1095 26000, div);
1096 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
1097 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
1098 if (t->we_cycle_time < t->we_off_time)
1099 t->we_cycle_time = t->we_off_time;
1100 t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
1101 if (t->re_cycle_time < t->re_off_time)
1102 t->re_cycle_time = t->re_off_time;
1103 t->cs_pulse_width = 0;
1104
1105 dev_dbg(blizzard.fbdev->dev,
1106 "[lut]cson %d csoff %d reon %d reoff %d\n",
1107 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
1108 dev_dbg(blizzard.fbdev->dev,
1109 "[lut]weon %d weoff %d recyc %d wecyc %d\n",
1110 t->we_on_time, t->we_off_time, t->re_cycle_time,
1111 t->we_cycle_time);
1112 dev_dbg(blizzard.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
1113 t->access_time, t->cs_pulse_width);
1114
1115 return blizzard.extif->convert_timings(t);
1116}
1117
1118static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
1119{
1120 int max_clk_div;
1121 int div;
1122
1123 blizzard.extif->get_clk_info(&blizzard.extif_clk_period, &max_clk_div);
1124 for (div = 1; div <= max_clk_div; div++) {
1125 if (calc_reg_timing(sysclk, div) == 0)
1126 break;
1127 }
1128 if (div > max_clk_div) {
1129 dev_dbg(blizzard.fbdev->dev, "reg timing failed\n");
1130 goto err;
1131 }
1132 *extif_mem_div = div;
1133
1134 for (div = 1; div <= max_clk_div; div++) {
1135 if (calc_lut_timing(sysclk, div) == 0)
1136 break;
1137 }
1138
1139 if (div > max_clk_div)
1140 goto err;
1141
1142 blizzard.extif_clk_div = div;
1143
1144 return 0;
1145err:
1146 dev_err(blizzard.fbdev->dev, "can't setup timings\n");
1147 return -1;
1148}
1149
1150static void calc_blizzard_clk_rates(unsigned long ext_clk,
1151 unsigned long *sys_clk, unsigned long *pix_clk)
1152{
1153 int pix_clk_src;
1154 int sys_div = 0, sys_mul = 0;
1155 int pix_div;
1156
1157 pix_clk_src = blizzard_read_reg(BLIZZARD_CLK_SRC);
1158 pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
1159 if ((pix_clk_src & (0x3 << 1)) == 0) {
1160 /* Source is the PLL */
1161 sys_div = (blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x3f) + 1;
1162 sys_mul = blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_0);
1163 sys_mul |= ((blizzard_read_reg(BLIZZARD_PLL_CLOCK_SYNTH_1)
1164 & 0x0f) << 11);
1165 *sys_clk = ext_clk * sys_mul / sys_div;
1166 } else /* else source is ext clk, or oscillator */
1167 *sys_clk = ext_clk;
1168
1169 *pix_clk = *sys_clk / pix_div; /* HZ */
1170 dev_dbg(blizzard.fbdev->dev,
1171 "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
1172 ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
1173 dev_dbg(blizzard.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
1174 *sys_clk, *pix_clk);
1175}
1176
1177static int setup_tearsync(unsigned long pix_clk, int extif_div)
1178{
1179 int hdisp, vdisp;
1180 int hndp, vndp;
1181 int hsw, vsw;
1182 int hs, vs;
1183 int hs_pol_inv, vs_pol_inv;
1184 int use_hsvs, use_ndp;
1185 u8 b;
1186
1187 hsw = blizzard_read_reg(BLIZZARD_HSW);
1188 vsw = blizzard_read_reg(BLIZZARD_VSW);
1189 hs_pol_inv = !(hsw & 0x80);
1190 vs_pol_inv = !(vsw & 0x80);
1191 hsw = hsw & 0x7f;
1192 vsw = vsw & 0x3f;
1193
1194 hdisp = blizzard_read_reg(BLIZZARD_HDISP) * 8;
1195 vdisp = blizzard_read_reg(BLIZZARD_VDISP0) +
1196 ((blizzard_read_reg(BLIZZARD_VDISP1) & 0x3) << 8);
1197
1198 hndp = blizzard_read_reg(BLIZZARD_HNDP) & 0x3f;
1199 vndp = blizzard_read_reg(BLIZZARD_VNDP);
1200
1201 /* time to transfer one pixel (16bpp) in ps */
1202 blizzard.pix_tx_time = blizzard.reg_timings.we_cycle_time;
1203 if (blizzard.extif->get_max_tx_rate != NULL) {
1204 /* The external interface might have a rate limitation,
1205 * if so, we have to maximize our transfer rate.
1206 */
1207 unsigned long min_tx_time;
1208 unsigned long max_tx_rate = blizzard.extif->get_max_tx_rate();
1209
1210 dev_dbg(blizzard.fbdev->dev, "max_tx_rate %ld HZ\n",
1211 max_tx_rate);
1212 min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
1213 if (blizzard.pix_tx_time < min_tx_time)
1214 blizzard.pix_tx_time = min_tx_time;
1215 }
1216
1217 /* time to update one line in ps */
1218 blizzard.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
1219 blizzard.line_upd_time *= 1000;
1220 if (hdisp * blizzard.pix_tx_time > blizzard.line_upd_time)
1221 /* transfer speed too low, we might have to use both
1222 * HS and VS */
1223 use_hsvs = 1;
1224 else
1225 /* decent transfer speed, we'll always use only VS */
1226 use_hsvs = 0;
1227
1228 if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
1229 /* HS or'ed with VS doesn't work, use the active high
1230 * TE signal based on HNDP / VNDP */
1231 use_ndp = 1;
1232 hs_pol_inv = 0;
1233 vs_pol_inv = 0;
1234 hs = hndp;
1235 vs = vndp;
1236 } else {
1237 /* Use HS or'ed with VS as a TE signal if both are needed
1238 * or VNDP if only vsync is needed. */
1239 use_ndp = 0;
1240 hs = hsw;
1241 vs = vsw;
1242 if (!use_hsvs) {
1243 hs_pol_inv = 0;
1244 vs_pol_inv = 0;
1245 }
1246 }
1247
1248 hs = hs * 1000000 / (pix_clk / 1000); /* ps */
1249 hs *= 1000;
1250
1251 vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
1252 vs *= 1000;
1253
1254 if (vs <= hs)
1255 return -EDOM;
1256 /* set VS to 120% of HS to minimize VS detection time */
1257 vs = hs * 12 / 10;
1258 /* minimize HS too */
1259 if (hs > 10000)
1260 hs = 10000;
1261
1262 b = blizzard_read_reg(BLIZZARD_NDISP_CTRL_STATUS);
1263 b &= ~0x3;
1264 b |= use_hsvs ? 1 : 0;
1265 b |= (use_ndp && use_hsvs) ? 0 : 2;
1266 blizzard_write_reg(BLIZZARD_NDISP_CTRL_STATUS, b);
1267
1268 blizzard.vsync_only = !use_hsvs;
1269
1270 dev_dbg(blizzard.fbdev->dev,
1271 "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
1272 pix_clk, blizzard.pix_tx_time, blizzard.line_upd_time);
1273 dev_dbg(blizzard.fbdev->dev,
1274 "hs %d ps vs %d ps mode %d vsync_only %d\n",
1275 hs, vs, b & 0x3, !use_hsvs);
1276
1277 return blizzard.extif->setup_tearsync(1, hs, vs,
1278 hs_pol_inv, vs_pol_inv,
1279 extif_div);
1280}
1281
1282static void blizzard_get_caps(int plane, struct omapfb_caps *caps)
1283{
1284 blizzard.int_ctrl->get_caps(plane, caps);
1285 caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
1286 OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE |
1287 OMAPFB_CAPS_WINDOW_SCALE |
1288 OMAPFB_CAPS_WINDOW_OVERLAY;
1289 if (blizzard.te_connected)
1290 caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
1291 caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
1292 (1 << OMAPFB_COLOR_YUV420);
1293}
1294
1295static void _save_regs(struct blizzard_reg_list *list, int cnt)
1296{
1297 int i;
1298
1299 for (i = 0; i < cnt; i++, list++) {
1300 int reg;
1301 for (reg = list->start; reg <= list->end; reg += 2)
1302 blizzard_reg_cache[reg / 2] = blizzard_read_reg(reg);
1303 }
1304}
1305
1306static void _restore_regs(struct blizzard_reg_list *list, int cnt)
1307{
1308 int i;
1309
1310 for (i = 0; i < cnt; i++, list++) {
1311 int reg;
1312 for (reg = list->start; reg <= list->end; reg += 2)
1313 blizzard_write_reg(reg, blizzard_reg_cache[reg / 2]);
1314 }
1315}
1316
1317static void blizzard_save_all_regs(void)
1318{
1319 _save_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
1320 _save_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
1321}
1322
1323static void blizzard_restore_pll_regs(void)
1324{
1325 _restore_regs(blizzard_pll_regs, ARRAY_SIZE(blizzard_pll_regs));
1326}
1327
1328static void blizzard_restore_gen_regs(void)
1329{
1330 _restore_regs(blizzard_gen_regs, ARRAY_SIZE(blizzard_gen_regs));
1331}
1332
1333static void blizzard_suspend(void)
1334{
1335 u32 l;
1336 unsigned long tmo;
1337
1338 if (blizzard.last_color_mode) {
1339 update_full_screen();
1340 blizzard_sync();
1341 }
1342 blizzard.update_mode_before_suspend = blizzard.update_mode;
1343 /* the following will disable clocks as well */
1344 blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
1345
1346 blizzard_save_all_regs();
1347
1348 blizzard_stop_sdram();
1349
1350 l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
1351 /* Standby, Sleep. We assume we use an external clock. */
1352 l |= 0x03;
1353 blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
1354
1355 tmo = jiffies + msecs_to_jiffies(100);
1356 while (!(blizzard_read_reg(BLIZZARD_PLL_MODE) & (1 << 1))) {
1357 if (time_after(jiffies, tmo)) {
1358 dev_err(blizzard.fbdev->dev,
1359 "s1d1374x: sleep timeout, stopping PLL manually\n");
1360 l = blizzard_read_reg(BLIZZARD_PLL_MODE);
1361 l &= ~0x03;
1362 /* Disable PLL, counter function */
1363 l |= 0x2;
1364 blizzard_write_reg(BLIZZARD_PLL_MODE, l);
1365 break;
1366 }
1367 msleep(1);
1368 }
1369
1370 if (blizzard.power_down != NULL)
1371 blizzard.power_down(blizzard.fbdev->dev);
1372}
1373
1374static void blizzard_resume(void)
1375{
1376 u32 l;
1377
1378 if (blizzard.power_up != NULL)
1379 blizzard.power_up(blizzard.fbdev->dev);
1380
1381 l = blizzard_read_reg(BLIZZARD_POWER_SAVE);
1382 /* Standby, Sleep */
1383 l &= ~0x03;
1384 blizzard_write_reg(BLIZZARD_POWER_SAVE, l);
1385
1386 blizzard_restore_pll_regs();
1387 l = blizzard_read_reg(BLIZZARD_PLL_MODE);
1388 l &= ~0x03;
1389 /* Enable PLL, counter function */
1390 l |= 0x1;
1391 blizzard_write_reg(BLIZZARD_PLL_MODE, l);
1392
1393 while (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & (1 << 7)))
1394 msleep(1);
1395
1396 blizzard_restart_sdram();
1397
1398 blizzard_restore_gen_regs();
1399
1400 /* Enable display */
1401 blizzard_write_reg(BLIZZARD_DISPLAY_MODE, 0x01);
1402
1403 /* the following will enable clocks as necessary */
1404 blizzard_set_update_mode(blizzard.update_mode_before_suspend);
1405
1406 /* Force a background update */
1407 blizzard.zoom_on = 1;
1408 update_full_screen();
1409 blizzard_sync();
1410}
1411
1412static int blizzard_init(struct omapfb_device *fbdev, int ext_mode,
1413 struct omapfb_mem_desc *req_vram)
1414{
1415 int r = 0, i;
1416 u8 rev, conf;
1417 unsigned long ext_clk;
1418 int extif_div;
1419 unsigned long sys_clk, pix_clk;
1420 struct omapfb_platform_data *omapfb_conf;
1421 struct blizzard_platform_data *ctrl_conf;
1422
1423 blizzard.fbdev = fbdev;
1424
1425 BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
1426
1427 blizzard.fbdev = fbdev;
1428 blizzard.extif = fbdev->ext_if;
1429 blizzard.int_ctrl = fbdev->int_ctrl;
1430
1431 omapfb_conf = fbdev->dev->platform_data;
1432 ctrl_conf = omapfb_conf->ctrl_platform_data;
1433 if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
1434 dev_err(fbdev->dev, "s1d1374x: missing platform data\n");
1435 r = -ENOENT;
1436 goto err1;
1437 }
1438
1439 blizzard.power_down = ctrl_conf->power_down;
1440 blizzard.power_up = ctrl_conf->power_up;
1441
1442 spin_lock_init(&blizzard.req_lock);
1443
1444 if ((r = blizzard.int_ctrl->init(fbdev, 1, req_vram)) < 0)
1445 goto err1;
1446
1447 if ((r = blizzard.extif->init(fbdev)) < 0)
1448 goto err2;
1449
1450 blizzard_ctrl.set_color_key = blizzard.int_ctrl->set_color_key;
1451 blizzard_ctrl.get_color_key = blizzard.int_ctrl->get_color_key;
1452 blizzard_ctrl.setup_mem = blizzard.int_ctrl->setup_mem;
1453 blizzard_ctrl.mmap = blizzard.int_ctrl->mmap;
1454
1455 ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
1456 if ((r = calc_extif_timings(ext_clk, &extif_div)) < 0)
1457 goto err3;
1458
1459 set_extif_timings(&blizzard.reg_timings);
1460
1461 if (blizzard.power_up != NULL)
1462 blizzard.power_up(fbdev->dev);
1463
1464 calc_blizzard_clk_rates(ext_clk, &sys_clk, &pix_clk);
1465
1466 if ((r = calc_extif_timings(sys_clk, &extif_div)) < 0)
1467 goto err3;
1468 set_extif_timings(&blizzard.reg_timings);
1469
1470 if (!(blizzard_read_reg(BLIZZARD_PLL_DIV) & 0x80)) {
1471 dev_err(fbdev->dev,
1472 "controller not initialized by the bootloader\n");
1473 r = -ENODEV;
1474 goto err3;
1475 }
1476
1477 if (ctrl_conf->te_connected) {
1478 if ((r = setup_tearsync(pix_clk, extif_div)) < 0)
1479 goto err3;
1480 blizzard.te_connected = 1;
1481 }
1482
1483 rev = blizzard_read_reg(BLIZZARD_REV_CODE);
1484 conf = blizzard_read_reg(BLIZZARD_CONFIG);
1485
1486 switch (rev & 0xfc) {
1487 case 0x9c:
1488 blizzard.version = BLIZZARD_VERSION_S1D13744;
1489 pr_info("omapfb: s1d13744 LCD controller rev %d "
1490 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1491 break;
1492 case 0xa4:
1493 blizzard.version = BLIZZARD_VERSION_S1D13745;
1494 pr_info("omapfb: s1d13745 LCD controller rev %d "
1495 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1496 break;
1497 default:
1498 dev_err(fbdev->dev, "invalid s1d1374x revision %02x\n",
1499 rev);
1500 r = -ENODEV;
1501 goto err3;
1502 }
1503
1504 blizzard.max_transmit_size = blizzard.extif->max_transmit_size;
1505
1506 blizzard.update_mode = OMAPFB_UPDATE_DISABLED;
1507
1508 blizzard.auto_update_window.x = 0;
1509 blizzard.auto_update_window.y = 0;
1510 blizzard.auto_update_window.width = fbdev->panel->x_res;
1511 blizzard.auto_update_window.height = fbdev->panel->y_res;
1512 blizzard.auto_update_window.out_x = 0;
1513 blizzard.auto_update_window.out_x = 0;
1514 blizzard.auto_update_window.out_width = fbdev->panel->x_res;
1515 blizzard.auto_update_window.out_height = fbdev->panel->y_res;
1516 blizzard.auto_update_window.format = 0;
1517
1518 blizzard.screen_width = fbdev->panel->x_res;
1519 blizzard.screen_height = fbdev->panel->y_res;
1520
1521 init_timer(&blizzard.auto_update_timer);
1522 blizzard.auto_update_timer.function = blizzard_update_window_auto;
1523 blizzard.auto_update_timer.data = 0;
1524
1525 INIT_LIST_HEAD(&blizzard.free_req_list);
1526 INIT_LIST_HEAD(&blizzard.pending_req_list);
1527 for (i = 0; i < ARRAY_SIZE(blizzard.req_pool); i++)
1528 list_add(&blizzard.req_pool[i].entry, &blizzard.free_req_list);
1529 BUG_ON(i <= IRQ_REQ_POOL_SIZE);
1530 sema_init(&blizzard.req_sema, i - IRQ_REQ_POOL_SIZE);
1531
1532 return 0;
1533err3:
1534 if (blizzard.power_down != NULL)
1535 blizzard.power_down(fbdev->dev);
1536 blizzard.extif->cleanup();
1537err2:
1538 blizzard.int_ctrl->cleanup();
1539err1:
1540 return r;
1541}
1542
1543static void blizzard_cleanup(void)
1544{
1545 blizzard_set_update_mode(OMAPFB_UPDATE_DISABLED);
1546 blizzard.extif->cleanup();
1547 blizzard.int_ctrl->cleanup();
1548 if (blizzard.power_down != NULL)
1549 blizzard.power_down(blizzard.fbdev->dev);
1550}
1551
1552struct lcd_ctrl blizzard_ctrl = {
1553 .name = "blizzard",
1554 .init = blizzard_init,
1555 .cleanup = blizzard_cleanup,
1556 .bind_client = blizzard_bind_client,
1557 .get_caps = blizzard_get_caps,
1558 .set_update_mode = blizzard_set_update_mode,
1559 .get_update_mode = blizzard_get_update_mode,
1560 .setup_plane = blizzard_setup_plane,
1561 .set_scale = blizzard_set_scale,
1562 .enable_plane = blizzard_enable_plane,
1563 .update_window = blizzard_update_window_async,
1564 .sync = blizzard_sync,
1565 .suspend = blizzard_suspend,
1566 .resume = blizzard_resume,
1567};
1568
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
new file mode 100644
index 000000000000..f4c23434de6f
--- /dev/null
+++ b/drivers/video/omap/dispc.c
@@ -0,0 +1,1502 @@
1/*
2 * OMAP2 display controller support
3 *
4 * Copyright (C) 2005 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/kernel.h>
22#include <linux/dma-mapping.h>
23#include <linux/vmalloc.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26
27#include <asm/arch/sram.h>
28#include <asm/arch/omapfb.h>
29#include <asm/arch/board.h>
30
31#include "dispc.h"
32
33#define MODULE_NAME "dispc"
34
35#define DSS_BASE 0x48050000
36#define DSS_SYSCONFIG 0x0010
37
38#define DISPC_BASE 0x48050400
39
40/* DISPC common */
41#define DISPC_REVISION 0x0000
42#define DISPC_SYSCONFIG 0x0010
43#define DISPC_SYSSTATUS 0x0014
44#define DISPC_IRQSTATUS 0x0018
45#define DISPC_IRQENABLE 0x001C
46#define DISPC_CONTROL 0x0040
47#define DISPC_CONFIG 0x0044
48#define DISPC_CAPABLE 0x0048
49#define DISPC_DEFAULT_COLOR0 0x004C
50#define DISPC_DEFAULT_COLOR1 0x0050
51#define DISPC_TRANS_COLOR0 0x0054
52#define DISPC_TRANS_COLOR1 0x0058
53#define DISPC_LINE_STATUS 0x005C
54#define DISPC_LINE_NUMBER 0x0060
55#define DISPC_TIMING_H 0x0064
56#define DISPC_TIMING_V 0x0068
57#define DISPC_POL_FREQ 0x006C
58#define DISPC_DIVISOR 0x0070
59#define DISPC_SIZE_DIG 0x0078
60#define DISPC_SIZE_LCD 0x007C
61
62#define DISPC_DATA_CYCLE1 0x01D4
63#define DISPC_DATA_CYCLE2 0x01D8
64#define DISPC_DATA_CYCLE3 0x01DC
65
66/* DISPC GFX plane */
67#define DISPC_GFX_BA0 0x0080
68#define DISPC_GFX_BA1 0x0084
69#define DISPC_GFX_POSITION 0x0088
70#define DISPC_GFX_SIZE 0x008C
71#define DISPC_GFX_ATTRIBUTES 0x00A0
72#define DISPC_GFX_FIFO_THRESHOLD 0x00A4
73#define DISPC_GFX_FIFO_SIZE_STATUS 0x00A8
74#define DISPC_GFX_ROW_INC 0x00AC
75#define DISPC_GFX_PIXEL_INC 0x00B0
76#define DISPC_GFX_WINDOW_SKIP 0x00B4
77#define DISPC_GFX_TABLE_BA 0x00B8
78
79/* DISPC Video plane 1/2 */
80#define DISPC_VID1_BASE 0x00BC
81#define DISPC_VID2_BASE 0x014C
82
83/* Offsets into DISPC_VID1/2_BASE */
84#define DISPC_VID_BA0 0x0000
85#define DISPC_VID_BA1 0x0004
86#define DISPC_VID_POSITION 0x0008
87#define DISPC_VID_SIZE 0x000C
88#define DISPC_VID_ATTRIBUTES 0x0010
89#define DISPC_VID_FIFO_THRESHOLD 0x0014
90#define DISPC_VID_FIFO_SIZE_STATUS 0x0018
91#define DISPC_VID_ROW_INC 0x001C
92#define DISPC_VID_PIXEL_INC 0x0020
93#define DISPC_VID_FIR 0x0024
94#define DISPC_VID_PICTURE_SIZE 0x0028
95#define DISPC_VID_ACCU0 0x002C
96#define DISPC_VID_ACCU1 0x0030
97
98/* 8 elements in 8 byte increments */
99#define DISPC_VID_FIR_COEF_H0 0x0034
100/* 8 elements in 8 byte increments */
101#define DISPC_VID_FIR_COEF_HV0 0x0038
102/* 5 elements in 4 byte increments */
103#define DISPC_VID_CONV_COEF0 0x0074
104
105#define DISPC_IRQ_FRAMEMASK 0x0001
106#define DISPC_IRQ_VSYNC 0x0002
107#define DISPC_IRQ_EVSYNC_EVEN 0x0004
108#define DISPC_IRQ_EVSYNC_ODD 0x0008
109#define DISPC_IRQ_ACBIAS_COUNT_STAT 0x0010
110#define DISPC_IRQ_PROG_LINE_NUM 0x0020
111#define DISPC_IRQ_GFX_FIFO_UNDERFLOW 0x0040
112#define DISPC_IRQ_GFX_END_WIN 0x0080
113#define DISPC_IRQ_PAL_GAMMA_MASK 0x0100
114#define DISPC_IRQ_OCP_ERR 0x0200
115#define DISPC_IRQ_VID1_FIFO_UNDERFLOW 0x0400
116#define DISPC_IRQ_VID1_END_WIN 0x0800
117#define DISPC_IRQ_VID2_FIFO_UNDERFLOW 0x1000
118#define DISPC_IRQ_VID2_END_WIN 0x2000
119#define DISPC_IRQ_SYNC_LOST 0x4000
120
121#define DISPC_IRQ_MASK_ALL 0x7fff
122
123#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
124 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
125 DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
126 DISPC_IRQ_SYNC_LOST)
127
128#define RFBI_CONTROL 0x48050040
129
130#define MAX_PALETTE_SIZE (256 * 16)
131
132#define FLD_MASK(pos, len) (((1 << len) - 1) << pos)
133
134#define MOD_REG_FLD(reg, mask, val) \
135 dispc_write_reg((reg), (dispc_read_reg(reg) & ~(mask)) | (val));
136
137#define OMAP2_SRAM_START 0x40200000
138/* Maximum size, in reality this is smaller if SRAM is partially locked. */
139#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
140
141/* We support the SDRAM / SRAM types. See OMAPFB_PLANE_MEMTYPE_* in omapfb.h */
142#define DISPC_MEMTYPE_NUM 2
143
144#define RESMAP_SIZE(_page_cnt) \
145 ((_page_cnt + (sizeof(unsigned long) * 8) - 1) / 8)
146#define RESMAP_PTR(_res_map, _page_nr) \
147 (((_res_map)->map) + (_page_nr) / (sizeof(unsigned long) * 8))
148#define RESMAP_MASK(_page_nr) \
149 (1 << ((_page_nr) & (sizeof(unsigned long) * 8 - 1)))
150
151struct resmap {
152 unsigned long start;
153 unsigned page_cnt;
154 unsigned long *map;
155};
156
157static struct {
158 u32 base;
159
160 struct omapfb_mem_desc mem_desc;
161 struct resmap *res_map[DISPC_MEMTYPE_NUM];
162 atomic_t map_count[OMAPFB_PLANE_NUM];
163
164 dma_addr_t palette_paddr;
165 void *palette_vaddr;
166
167 int ext_mode;
168
169 unsigned long enabled_irqs;
170 void (*irq_callback)(void *);
171 void *irq_callback_data;
172 struct completion frame_done;
173
174 int fir_hinc[OMAPFB_PLANE_NUM];
175 int fir_vinc[OMAPFB_PLANE_NUM];
176
177 struct clk *dss_ick, *dss1_fck;
178 struct clk *dss_54m_fck;
179
180 enum omapfb_update_mode update_mode;
181 struct omapfb_device *fbdev;
182
183 struct omapfb_color_key color_key;
184} dispc;
185
186static void enable_lcd_clocks(int enable);
187
188static void inline dispc_write_reg(int idx, u32 val)
189{
190 __raw_writel(val, dispc.base + idx);
191}
192
193static u32 inline dispc_read_reg(int idx)
194{
195 u32 l = __raw_readl(dispc.base + idx);
196 return l;
197}
198
199/* Select RFBI or bypass mode */
200static void enable_rfbi_mode(int enable)
201{
202 u32 l;
203
204 l = dispc_read_reg(DISPC_CONTROL);
205 /* Enable RFBI, GPIO0/1 */
206 l &= ~((1 << 11) | (1 << 15) | (1 << 16));
207 l |= enable ? (1 << 11) : 0;
208 /* RFBI En: GPIO0/1=10 RFBI Dis: GPIO0/1=11 */
209 l |= 1 << 15;
210 l |= enable ? 0 : (1 << 16);
211 dispc_write_reg(DISPC_CONTROL, l);
212
213 /* Set bypass mode in RFBI module */
214 l = __raw_readl(io_p2v(RFBI_CONTROL));
215 l |= enable ? 0 : (1 << 1);
216 __raw_writel(l, io_p2v(RFBI_CONTROL));
217}
218
219static void set_lcd_data_lines(int data_lines)
220{
221 u32 l;
222 int code = 0;
223
224 switch (data_lines) {
225 case 12:
226 code = 0;
227 break;
228 case 16:
229 code = 1;
230 break;
231 case 18:
232 code = 2;
233 break;
234 case 24:
235 code = 3;
236 break;
237 default:
238 BUG();
239 }
240
241 l = dispc_read_reg(DISPC_CONTROL);
242 l &= ~(0x03 << 8);
243 l |= code << 8;
244 dispc_write_reg(DISPC_CONTROL, l);
245}
246
247static void set_load_mode(int mode)
248{
249 BUG_ON(mode & ~(DISPC_LOAD_CLUT_ONLY | DISPC_LOAD_FRAME_ONLY |
250 DISPC_LOAD_CLUT_ONCE_FRAME));
251 MOD_REG_FLD(DISPC_CONFIG, 0x03 << 1, mode << 1);
252}
253
254void omap_dispc_set_lcd_size(int x, int y)
255{
256 BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
257 enable_lcd_clocks(1);
258 MOD_REG_FLD(DISPC_SIZE_LCD, FLD_MASK(16, 11) | FLD_MASK(0, 11),
259 ((y - 1) << 16) | (x - 1));
260 enable_lcd_clocks(0);
261}
262EXPORT_SYMBOL(omap_dispc_set_lcd_size);
263
264void omap_dispc_set_digit_size(int x, int y)
265{
266 BUG_ON((x > (1 << 11)) || (y > (1 << 11)));
267 enable_lcd_clocks(1);
268 MOD_REG_FLD(DISPC_SIZE_DIG, FLD_MASK(16, 11) | FLD_MASK(0, 11),
269 ((y - 1) << 16) | (x - 1));
270 enable_lcd_clocks(0);
271}
272EXPORT_SYMBOL(omap_dispc_set_digit_size);
273
274static void setup_plane_fifo(int plane, int ext_mode)
275{
276 const u32 ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
277 DISPC_VID1_BASE + DISPC_VID_FIFO_THRESHOLD,
278 DISPC_VID2_BASE + DISPC_VID_FIFO_THRESHOLD };
279 const u32 fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
280 DISPC_VID1_BASE + DISPC_VID_FIFO_SIZE_STATUS,
281 DISPC_VID2_BASE + DISPC_VID_FIFO_SIZE_STATUS };
282 int low, high;
283 u32 l;
284
285 BUG_ON(plane > 2);
286
287 l = dispc_read_reg(fsz_reg[plane]);
288 l &= FLD_MASK(0, 9);
289 if (ext_mode) {
290 low = l * 3 / 4;
291 high = l;
292 } else {
293 low = l / 4;
294 high = l * 3 / 4;
295 }
296 MOD_REG_FLD(ftrs_reg[plane], FLD_MASK(16, 9) | FLD_MASK(0, 9),
297 (high << 16) | low);
298}
299
300void omap_dispc_enable_lcd_out(int enable)
301{
302 enable_lcd_clocks(1);
303 MOD_REG_FLD(DISPC_CONTROL, 1, enable ? 1 : 0);
304 enable_lcd_clocks(0);
305}
306EXPORT_SYMBOL(omap_dispc_enable_lcd_out);
307
308void omap_dispc_enable_digit_out(int enable)
309{
310 enable_lcd_clocks(1);
311 MOD_REG_FLD(DISPC_CONTROL, 1 << 1, enable ? 1 << 1 : 0);
312 enable_lcd_clocks(0);
313}
314EXPORT_SYMBOL(omap_dispc_enable_digit_out);
315
316static inline int _setup_plane(int plane, int channel_out,
317 u32 paddr, int screen_width,
318 int pos_x, int pos_y, int width, int height,
319 int color_mode)
320{
321 const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
322 DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
323 DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
324 const u32 ba_reg[] = { DISPC_GFX_BA0, DISPC_VID1_BASE + DISPC_VID_BA0,
325 DISPC_VID2_BASE + DISPC_VID_BA0 };
326 const u32 ps_reg[] = { DISPC_GFX_POSITION,
327 DISPC_VID1_BASE + DISPC_VID_POSITION,
328 DISPC_VID2_BASE + DISPC_VID_POSITION };
329 const u32 sz_reg[] = { DISPC_GFX_SIZE,
330 DISPC_VID1_BASE + DISPC_VID_PICTURE_SIZE,
331 DISPC_VID2_BASE + DISPC_VID_PICTURE_SIZE };
332 const u32 ri_reg[] = { DISPC_GFX_ROW_INC,
333 DISPC_VID1_BASE + DISPC_VID_ROW_INC,
334 DISPC_VID2_BASE + DISPC_VID_ROW_INC };
335 const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
336 DISPC_VID2_BASE + DISPC_VID_SIZE };
337
338 int chout_shift, burst_shift;
339 int chout_val;
340 int color_code;
341 int bpp;
342 int cconv_en;
343 int set_vsize;
344 u32 l;
345
346#ifdef VERBOSE
347 dev_dbg(dispc.fbdev->dev, "plane %d channel %d paddr %#08x scr_width %d"
348 " pos_x %d pos_y %d width %d height %d color_mode %d\n",
349 plane, channel_out, paddr, screen_width, pos_x, pos_y,
350 width, height, color_mode);
351#endif
352
353 set_vsize = 0;
354 switch (plane) {
355 case OMAPFB_PLANE_GFX:
356 burst_shift = 6;
357 chout_shift = 8;
358 break;
359 case OMAPFB_PLANE_VID1:
360 case OMAPFB_PLANE_VID2:
361 burst_shift = 14;
362 chout_shift = 16;
363 set_vsize = 1;
364 break;
365 default:
366 return -EINVAL;
367 }
368
369 switch (channel_out) {
370 case OMAPFB_CHANNEL_OUT_LCD:
371 chout_val = 0;
372 break;
373 case OMAPFB_CHANNEL_OUT_DIGIT:
374 chout_val = 1;
375 break;
376 default:
377 return -EINVAL;
378 }
379
380 cconv_en = 0;
381 switch (color_mode) {
382 case OMAPFB_COLOR_RGB565:
383 color_code = DISPC_RGB_16_BPP;
384 bpp = 16;
385 break;
386 case OMAPFB_COLOR_YUV422:
387 if (plane == 0)
388 return -EINVAL;
389 color_code = DISPC_UYVY_422;
390 cconv_en = 1;
391 bpp = 16;
392 break;
393 case OMAPFB_COLOR_YUY422:
394 if (plane == 0)
395 return -EINVAL;
396 color_code = DISPC_YUV2_422;
397 cconv_en = 1;
398 bpp = 16;
399 break;
400 default:
401 return -EINVAL;
402 }
403
404 l = dispc_read_reg(at_reg[plane]);
405
406 l &= ~(0x0f << 1);
407 l |= color_code << 1;
408 l &= ~(1 << 9);
409 l |= cconv_en << 9;
410
411 l &= ~(0x03 << burst_shift);
412 l |= DISPC_BURST_8x32 << burst_shift;
413
414 l &= ~(1 << chout_shift);
415 l |= chout_val << chout_shift;
416
417 dispc_write_reg(at_reg[plane], l);
418
419 dispc_write_reg(ba_reg[plane], paddr);
420 MOD_REG_FLD(ps_reg[plane],
421 FLD_MASK(16, 11) | FLD_MASK(0, 11), (pos_y << 16) | pos_x);
422
423 MOD_REG_FLD(sz_reg[plane], FLD_MASK(16, 11) | FLD_MASK(0, 11),
424 ((height - 1) << 16) | (width - 1));
425
426 if (set_vsize) {
427 /* Set video size if set_scale hasn't set it */
428 if (!dispc.fir_vinc[plane])
429 MOD_REG_FLD(vs_reg[plane],
430 FLD_MASK(16, 11), (height - 1) << 16);
431 if (!dispc.fir_hinc[plane])
432 MOD_REG_FLD(vs_reg[plane],
433 FLD_MASK(0, 11), width - 1);
434 }
435
436 dispc_write_reg(ri_reg[plane], (screen_width - width) * bpp / 8 + 1);
437
438 return height * screen_width * bpp / 8;
439}
440
441static int omap_dispc_setup_plane(int plane, int channel_out,
442 unsigned long offset,
443 int screen_width,
444 int pos_x, int pos_y, int width, int height,
445 int color_mode)
446{
447 u32 paddr;
448 int r;
449
450 if ((unsigned)plane > dispc.mem_desc.region_cnt)
451 return -EINVAL;
452 paddr = dispc.mem_desc.region[plane].paddr + offset;
453 enable_lcd_clocks(1);
454 r = _setup_plane(plane, channel_out, paddr,
455 screen_width,
456 pos_x, pos_y, width, height, color_mode);
457 enable_lcd_clocks(0);
458 return r;
459}
460
461static void write_firh_reg(int plane, int reg, u32 value)
462{
463 u32 base;
464
465 if (plane == 1)
466 base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_H0;
467 else
468 base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_H0;
469 dispc_write_reg(base + reg * 8, value);
470}
471
472static void write_firhv_reg(int plane, int reg, u32 value)
473{
474 u32 base;
475
476 if (plane == 1)
477 base = DISPC_VID1_BASE + DISPC_VID_FIR_COEF_HV0;
478 else
479 base = DISPC_VID2_BASE + DISPC_VID_FIR_COEF_HV0;
480 dispc_write_reg(base + reg * 8, value);
481}
482
483static void set_upsampling_coef_table(int plane)
484{
485 const u32 coef[][2] = {
486 { 0x00800000, 0x00800000 },
487 { 0x0D7CF800, 0x037B02FF },
488 { 0x1E70F5FF, 0x0C6F05FE },
489 { 0x335FF5FE, 0x205907FB },
490 { 0xF74949F7, 0x00404000 },
491 { 0xF55F33FB, 0x075920FE },
492 { 0xF5701EFE, 0x056F0CFF },
493 { 0xF87C0DFF, 0x027B0300 },
494 };
495 int i;
496
497 for (i = 0; i < 8; i++) {
498 write_firh_reg(plane, i, coef[i][0]);
499 write_firhv_reg(plane, i, coef[i][1]);
500 }
501}
502
503static int omap_dispc_set_scale(int plane,
504 int orig_width, int orig_height,
505 int out_width, int out_height)
506{
507 const u32 at_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
508 DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
509 const u32 vs_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_SIZE,
510 DISPC_VID2_BASE + DISPC_VID_SIZE };
511 const u32 fir_reg[] = { 0, DISPC_VID1_BASE + DISPC_VID_FIR,
512 DISPC_VID2_BASE + DISPC_VID_FIR };
513
514 u32 l;
515 int fir_hinc;
516 int fir_vinc;
517
518 if ((unsigned)plane > OMAPFB_PLANE_NUM)
519 return -ENODEV;
520
521 if (plane == OMAPFB_PLANE_GFX &&
522 (out_width != orig_width || out_height != orig_height))
523 return -EINVAL;
524
525 enable_lcd_clocks(1);
526 if (orig_width < out_width) {
527 /*
528 * Upsampling.
529 * Currently you can only scale both dimensions in one way.
530 */
531 if (orig_height > out_height ||
532 orig_width * 8 < out_width ||
533 orig_height * 8 < out_height) {
534 enable_lcd_clocks(0);
535 return -EINVAL;
536 }
537 set_upsampling_coef_table(plane);
538 } else if (orig_width > out_width) {
539 /* Downsampling not yet supported
540 */
541
542 enable_lcd_clocks(0);
543 return -EINVAL;
544 }
545 if (!orig_width || orig_width == out_width)
546 fir_hinc = 0;
547 else
548 fir_hinc = 1024 * orig_width / out_width;
549 if (!orig_height || orig_height == out_height)
550 fir_vinc = 0;
551 else
552 fir_vinc = 1024 * orig_height / out_height;
553 dispc.fir_hinc[plane] = fir_hinc;
554 dispc.fir_vinc[plane] = fir_vinc;
555
556 MOD_REG_FLD(fir_reg[plane],
557 FLD_MASK(16, 12) | FLD_MASK(0, 12),
558 ((fir_vinc & 4095) << 16) |
559 (fir_hinc & 4095));
560
561 dev_dbg(dispc.fbdev->dev, "out_width %d out_height %d orig_width %d "
562 "orig_height %d fir_hinc %d fir_vinc %d\n",
563 out_width, out_height, orig_width, orig_height,
564 fir_hinc, fir_vinc);
565
566 MOD_REG_FLD(vs_reg[plane],
567 FLD_MASK(16, 11) | FLD_MASK(0, 11),
568 ((out_height - 1) << 16) | (out_width - 1));
569
570 l = dispc_read_reg(at_reg[plane]);
571 l &= ~(0x03 << 5);
572 l |= fir_hinc ? (1 << 5) : 0;
573 l |= fir_vinc ? (1 << 6) : 0;
574 dispc_write_reg(at_reg[plane], l);
575
576 enable_lcd_clocks(0);
577 return 0;
578}
579
580static int omap_dispc_enable_plane(int plane, int enable)
581{
582 const u32 at_reg[] = { DISPC_GFX_ATTRIBUTES,
583 DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES,
584 DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES };
585 if ((unsigned int)plane > dispc.mem_desc.region_cnt)
586 return -EINVAL;
587
588 enable_lcd_clocks(1);
589 MOD_REG_FLD(at_reg[plane], 1, enable ? 1 : 0);
590 enable_lcd_clocks(0);
591
592 return 0;
593}
594
595static int omap_dispc_set_color_key(struct omapfb_color_key *ck)
596{
597 u32 df_reg, tr_reg;
598 int shift, val;
599
600 switch (ck->channel_out) {
601 case OMAPFB_CHANNEL_OUT_LCD:
602 df_reg = DISPC_DEFAULT_COLOR0;
603 tr_reg = DISPC_TRANS_COLOR0;
604 shift = 10;
605 break;
606 case OMAPFB_CHANNEL_OUT_DIGIT:
607 df_reg = DISPC_DEFAULT_COLOR1;
608 tr_reg = DISPC_TRANS_COLOR1;
609 shift = 12;
610 break;
611 default:
612 return -EINVAL;
613 }
614 switch (ck->key_type) {
615 case OMAPFB_COLOR_KEY_DISABLED:
616 val = 0;
617 break;
618 case OMAPFB_COLOR_KEY_GFX_DST:
619 val = 1;
620 break;
621 case OMAPFB_COLOR_KEY_VID_SRC:
622 val = 3;
623 break;
624 default:
625 return -EINVAL;
626 }
627 enable_lcd_clocks(1);
628 MOD_REG_FLD(DISPC_CONFIG, FLD_MASK(shift, 2), val << shift);
629
630 if (val != 0)
631 dispc_write_reg(tr_reg, ck->trans_key);
632 dispc_write_reg(df_reg, ck->background);
633 enable_lcd_clocks(0);
634
635 dispc.color_key = *ck;
636
637 return 0;
638}
639
640static int omap_dispc_get_color_key(struct omapfb_color_key *ck)
641{
642 *ck = dispc.color_key;
643 return 0;
644}
645
646static void load_palette(void)
647{
648}
649
650static int omap_dispc_set_update_mode(enum omapfb_update_mode mode)
651{
652 int r = 0;
653
654 if (mode != dispc.update_mode) {
655 switch (mode) {
656 case OMAPFB_AUTO_UPDATE:
657 case OMAPFB_MANUAL_UPDATE:
658 enable_lcd_clocks(1);
659 omap_dispc_enable_lcd_out(1);
660 dispc.update_mode = mode;
661 break;
662 case OMAPFB_UPDATE_DISABLED:
663 init_completion(&dispc.frame_done);
664 omap_dispc_enable_lcd_out(0);
665 if (!wait_for_completion_timeout(&dispc.frame_done,
666 msecs_to_jiffies(500))) {
667 dev_err(dispc.fbdev->dev,
668 "timeout waiting for FRAME DONE\n");
669 }
670 dispc.update_mode = mode;
671 enable_lcd_clocks(0);
672 break;
673 default:
674 r = -EINVAL;
675 }
676 }
677
678 return r;
679}
680
681static void omap_dispc_get_caps(int plane, struct omapfb_caps *caps)
682{
683 caps->ctrl |= OMAPFB_CAPS_PLANE_RELOCATE_MEM;
684 if (plane > 0)
685 caps->ctrl |= OMAPFB_CAPS_PLANE_SCALE;
686 caps->plane_color |= (1 << OMAPFB_COLOR_RGB565) |
687 (1 << OMAPFB_COLOR_YUV422) |
688 (1 << OMAPFB_COLOR_YUY422);
689 if (plane == 0)
690 caps->plane_color |= (1 << OMAPFB_COLOR_CLUT_8BPP) |
691 (1 << OMAPFB_COLOR_CLUT_4BPP) |
692 (1 << OMAPFB_COLOR_CLUT_2BPP) |
693 (1 << OMAPFB_COLOR_CLUT_1BPP) |
694 (1 << OMAPFB_COLOR_RGB444);
695}
696
697static enum omapfb_update_mode omap_dispc_get_update_mode(void)
698{
699 return dispc.update_mode;
700}
701
702static void setup_color_conv_coef(void)
703{
704 u32 mask = FLD_MASK(16, 11) | FLD_MASK(0, 11);
705 int cf1_reg = DISPC_VID1_BASE + DISPC_VID_CONV_COEF0;
706 int cf2_reg = DISPC_VID2_BASE + DISPC_VID_CONV_COEF0;
707 int at1_reg = DISPC_VID1_BASE + DISPC_VID_ATTRIBUTES;
708 int at2_reg = DISPC_VID2_BASE + DISPC_VID_ATTRIBUTES;
709 const struct color_conv_coef {
710 int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
711 int full_range;
712 } ctbl_bt601_5 = {
713 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
714 };
715 const struct color_conv_coef *ct;
716#define CVAL(x, y) (((x & 2047) << 16) | (y & 2047))
717
718 ct = &ctbl_bt601_5;
719
720 MOD_REG_FLD(cf1_reg, mask, CVAL(ct->rcr, ct->ry));
721 MOD_REG_FLD(cf1_reg + 4, mask, CVAL(ct->gy, ct->rcb));
722 MOD_REG_FLD(cf1_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
723 MOD_REG_FLD(cf1_reg + 12, mask, CVAL(ct->bcr, ct->by));
724 MOD_REG_FLD(cf1_reg + 16, mask, CVAL(0, ct->bcb));
725
726 MOD_REG_FLD(cf2_reg, mask, CVAL(ct->rcr, ct->ry));
727 MOD_REG_FLD(cf2_reg + 4, mask, CVAL(ct->gy, ct->rcb));
728 MOD_REG_FLD(cf2_reg + 8, mask, CVAL(ct->gcb, ct->gcr));
729 MOD_REG_FLD(cf2_reg + 12, mask, CVAL(ct->bcr, ct->by));
730 MOD_REG_FLD(cf2_reg + 16, mask, CVAL(0, ct->bcb));
731#undef CVAL
732
733 MOD_REG_FLD(at1_reg, (1 << 11), ct->full_range);
734 MOD_REG_FLD(at2_reg, (1 << 11), ct->full_range);
735}
736
737static void calc_ck_div(int is_tft, int pck, int *lck_div, int *pck_div)
738{
739 unsigned long fck, lck;
740
741 *lck_div = 1;
742 pck = max(1, pck);
743 fck = clk_get_rate(dispc.dss1_fck);
744 lck = fck;
745 *pck_div = (lck + pck - 1) / pck;
746 if (is_tft)
747 *pck_div = max(2, *pck_div);
748 else
749 *pck_div = max(3, *pck_div);
750 if (*pck_div > 255) {
751 *pck_div = 255;
752 lck = pck * *pck_div;
753 *lck_div = fck / lck;
754 BUG_ON(*lck_div < 1);
755 if (*lck_div > 255) {
756 *lck_div = 255;
757 dev_warn(dispc.fbdev->dev, "pixclock %d kHz too low.\n",
758 pck / 1000);
759 }
760 }
761}
762
763static void set_lcd_tft_mode(int enable)
764{
765 u32 mask;
766
767 mask = 1 << 3;
768 MOD_REG_FLD(DISPC_CONTROL, mask, enable ? mask : 0);
769}
770
771static void set_lcd_timings(void)
772{
773 u32 l;
774 int lck_div, pck_div;
775 struct lcd_panel *panel = dispc.fbdev->panel;
776 int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
777 unsigned long fck;
778
779 l = dispc_read_reg(DISPC_TIMING_H);
780 l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
781 l |= ( max(1, (min(64, panel->hsw))) - 1 ) << 0;
782 l |= ( max(1, (min(256, panel->hfp))) - 1 ) << 8;
783 l |= ( max(1, (min(256, panel->hbp))) - 1 ) << 20;
784 dispc_write_reg(DISPC_TIMING_H, l);
785
786 l = dispc_read_reg(DISPC_TIMING_V);
787 l &= ~(FLD_MASK(0, 6) | FLD_MASK(8, 8) | FLD_MASK(20, 8));
788 l |= ( max(1, (min(64, panel->vsw))) - 1 ) << 0;
789 l |= ( max(0, (min(255, panel->vfp))) - 0 ) << 8;
790 l |= ( max(0, (min(255, panel->vbp))) - 0 ) << 20;
791 dispc_write_reg(DISPC_TIMING_V, l);
792
793 l = dispc_read_reg(DISPC_POL_FREQ);
794 l &= ~FLD_MASK(12, 6);
795 l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 12;
796 l |= panel->acb & 0xff;
797 dispc_write_reg(DISPC_POL_FREQ, l);
798
799 calc_ck_div(is_tft, panel->pixel_clock * 1000, &lck_div, &pck_div);
800
801 l = dispc_read_reg(DISPC_DIVISOR);
802 l &= ~(FLD_MASK(16, 8) | FLD_MASK(0, 8));
803 l |= (lck_div << 16) | (pck_div << 0);
804 dispc_write_reg(DISPC_DIVISOR, l);
805
806 /* update panel info with the exact clock */
807 fck = clk_get_rate(dispc.dss1_fck);
808 panel->pixel_clock = fck / lck_div / pck_div / 1000;
809}
810
811int omap_dispc_request_irq(void (*callback)(void *data), void *data)
812{
813 int r = 0;
814
815 BUG_ON(callback == NULL);
816
817 if (dispc.irq_callback)
818 r = -EBUSY;
819 else {
820 dispc.irq_callback = callback;
821 dispc.irq_callback_data = data;
822 }
823
824 return r;
825}
826EXPORT_SYMBOL(omap_dispc_request_irq);
827
828void omap_dispc_enable_irqs(int irq_mask)
829{
830 enable_lcd_clocks(1);
831 dispc.enabled_irqs = irq_mask;
832 irq_mask |= DISPC_IRQ_MASK_ERROR;
833 MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
834 enable_lcd_clocks(0);
835}
836EXPORT_SYMBOL(omap_dispc_enable_irqs);
837
838void omap_dispc_disable_irqs(int irq_mask)
839{
840 enable_lcd_clocks(1);
841 dispc.enabled_irqs &= ~irq_mask;
842 irq_mask &= ~DISPC_IRQ_MASK_ERROR;
843 MOD_REG_FLD(DISPC_IRQENABLE, 0x7fff, irq_mask);
844 enable_lcd_clocks(0);
845}
846EXPORT_SYMBOL(omap_dispc_disable_irqs);
847
848void omap_dispc_free_irq(void)
849{
850 enable_lcd_clocks(1);
851 omap_dispc_disable_irqs(DISPC_IRQ_MASK_ALL);
852 dispc.irq_callback = NULL;
853 dispc.irq_callback_data = NULL;
854 enable_lcd_clocks(0);
855}
856EXPORT_SYMBOL(omap_dispc_free_irq);
857
858static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
859{
860 u32 stat = dispc_read_reg(DISPC_IRQSTATUS);
861
862 if (stat & DISPC_IRQ_FRAMEMASK)
863 complete(&dispc.frame_done);
864
865 if (stat & DISPC_IRQ_MASK_ERROR) {
866 if (printk_ratelimit()) {
867 dev_err(dispc.fbdev->dev, "irq error status %04x\n",
868 stat & 0x7fff);
869 }
870 }
871
872 if ((stat & dispc.enabled_irqs) && dispc.irq_callback)
873 dispc.irq_callback(dispc.irq_callback_data);
874
875 dispc_write_reg(DISPC_IRQSTATUS, stat);
876
877 return IRQ_HANDLED;
878}
879
880static int get_dss_clocks(void)
881{
882 if (IS_ERR((dispc.dss_ick = clk_get(dispc.fbdev->dev, "dss_ick")))) {
883 dev_err(dispc.fbdev->dev, "can't get dss_ick");
884 return PTR_ERR(dispc.dss_ick);
885 }
886
887 if (IS_ERR((dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck")))) {
888 dev_err(dispc.fbdev->dev, "can't get dss1_fck");
889 clk_put(dispc.dss_ick);
890 return PTR_ERR(dispc.dss1_fck);
891 }
892
893 if (IS_ERR((dispc.dss_54m_fck =
894 clk_get(dispc.fbdev->dev, "dss_54m_fck")))) {
895 dev_err(dispc.fbdev->dev, "can't get dss_54m_fck");
896 clk_put(dispc.dss_ick);
897 clk_put(dispc.dss1_fck);
898 return PTR_ERR(dispc.dss_54m_fck);
899 }
900
901 return 0;
902}
903
904static void put_dss_clocks(void)
905{
906 clk_put(dispc.dss_54m_fck);
907 clk_put(dispc.dss1_fck);
908 clk_put(dispc.dss_ick);
909}
910
911static void enable_lcd_clocks(int enable)
912{
913 if (enable)
914 clk_enable(dispc.dss1_fck);
915 else
916 clk_disable(dispc.dss1_fck);
917}
918
919static void enable_interface_clocks(int enable)
920{
921 if (enable)
922 clk_enable(dispc.dss_ick);
923 else
924 clk_disable(dispc.dss_ick);
925}
926
927static void enable_digit_clocks(int enable)
928{
929 if (enable)
930 clk_enable(dispc.dss_54m_fck);
931 else
932 clk_disable(dispc.dss_54m_fck);
933}
934
935static void omap_dispc_suspend(void)
936{
937 if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
938 init_completion(&dispc.frame_done);
939 omap_dispc_enable_lcd_out(0);
940 if (!wait_for_completion_timeout(&dispc.frame_done,
941 msecs_to_jiffies(500))) {
942 dev_err(dispc.fbdev->dev,
943 "timeout waiting for FRAME DONE\n");
944 }
945 enable_lcd_clocks(0);
946 }
947}
948
949static void omap_dispc_resume(void)
950{
951 if (dispc.update_mode == OMAPFB_AUTO_UPDATE) {
952 enable_lcd_clocks(1);
953 if (!dispc.ext_mode) {
954 set_lcd_timings();
955 load_palette();
956 }
957 omap_dispc_enable_lcd_out(1);
958 }
959}
960
961
962static int omap_dispc_update_window(struct fb_info *fbi,
963 struct omapfb_update_window *win,
964 void (*complete_callback)(void *arg),
965 void *complete_callback_data)
966{
967 return dispc.update_mode == OMAPFB_UPDATE_DISABLED ? -ENODEV : 0;
968}
969
970static int mmap_kern(struct omapfb_mem_region *region)
971{
972 struct vm_struct *kvma;
973 struct vm_area_struct vma;
974 pgprot_t pgprot;
975 unsigned long vaddr;
976
977 kvma = get_vm_area(region->size, VM_IOREMAP);
978 if (kvma == NULL) {
979 dev_err(dispc.fbdev->dev, "can't get kernel vm area\n");
980 return -ENOMEM;
981 }
982 vma.vm_mm = &init_mm;
983
984 vaddr = (unsigned long)kvma->addr;
985
986 pgprot = pgprot_writecombine(pgprot_kernel);
987 vma.vm_start = vaddr;
988 vma.vm_end = vaddr + region->size;
989 if (io_remap_pfn_range(&vma, vaddr, region->paddr >> PAGE_SHIFT,
990 region->size, pgprot) < 0) {
991 dev_err(dispc.fbdev->dev, "kernel mmap for FBMEM failed\n");
992 return -EAGAIN;
993 }
994 region->vaddr = (void *)vaddr;
995
996 return 0;
997}
998
999static void mmap_user_open(struct vm_area_struct *vma)
1000{
1001 int plane = (int)vma->vm_private_data;
1002
1003 atomic_inc(&dispc.map_count[plane]);
1004}
1005
1006static void mmap_user_close(struct vm_area_struct *vma)
1007{
1008 int plane = (int)vma->vm_private_data;
1009
1010 atomic_dec(&dispc.map_count[plane]);
1011}
1012
1013static struct vm_operations_struct mmap_user_ops = {
1014 .open = mmap_user_open,
1015 .close = mmap_user_close,
1016};
1017
1018static int omap_dispc_mmap_user(struct fb_info *info,
1019 struct vm_area_struct *vma)
1020{
1021 struct omapfb_plane_struct *plane = info->par;
1022 unsigned long off;
1023 unsigned long start;
1024 u32 len;
1025
1026 if (vma->vm_end - vma->vm_start == 0)
1027 return 0;
1028 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1029 return -EINVAL;
1030 off = vma->vm_pgoff << PAGE_SHIFT;
1031
1032 start = info->fix.smem_start;
1033 len = info->fix.smem_len;
1034 if (off >= len)
1035 return -EINVAL;
1036 if ((vma->vm_end - vma->vm_start + off) > len)
1037 return -EINVAL;
1038 off += start;
1039 vma->vm_pgoff = off >> PAGE_SHIFT;
1040 vma->vm_flags |= VM_IO | VM_RESERVED;
1041 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1042 vma->vm_ops = &mmap_user_ops;
1043 vma->vm_private_data = (void *)plane->idx;
1044 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1045 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1046 return -EAGAIN;
1047 /* vm_ops.open won't be called for mmap itself. */
1048 atomic_inc(&dispc.map_count[plane->idx]);
1049 return 0;
1050}
1051
1052static void unmap_kern(struct omapfb_mem_region *region)
1053{
1054 vunmap(region->vaddr);
1055}
1056
1057static int alloc_palette_ram(void)
1058{
1059 dispc.palette_vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
1060 MAX_PALETTE_SIZE, &dispc.palette_paddr, GFP_KERNEL);
1061 if (dispc.palette_vaddr == NULL) {
1062 dev_err(dispc.fbdev->dev, "failed to alloc palette memory\n");
1063 return -ENOMEM;
1064 }
1065
1066 return 0;
1067}
1068
1069static void free_palette_ram(void)
1070{
1071 dma_free_writecombine(dispc.fbdev->dev, MAX_PALETTE_SIZE,
1072 dispc.palette_vaddr, dispc.palette_paddr);
1073}
1074
1075static int alloc_fbmem(struct omapfb_mem_region *region)
1076{
1077 region->vaddr = dma_alloc_writecombine(dispc.fbdev->dev,
1078 region->size, &region->paddr, GFP_KERNEL);
1079
1080 if (region->vaddr == NULL) {
1081 dev_err(dispc.fbdev->dev, "unable to allocate FB DMA memory\n");
1082 return -ENOMEM;
1083 }
1084
1085 return 0;
1086}
1087
1088static void free_fbmem(struct omapfb_mem_region *region)
1089{
1090 dma_free_writecombine(dispc.fbdev->dev, region->size,
1091 region->vaddr, region->paddr);
1092}
1093
1094static struct resmap *init_resmap(unsigned long start, size_t size)
1095{
1096 unsigned page_cnt;
1097 struct resmap *res_map;
1098
1099 page_cnt = PAGE_ALIGN(size) / PAGE_SIZE;
1100 res_map =
1101 kzalloc(sizeof(struct resmap) + RESMAP_SIZE(page_cnt), GFP_KERNEL);
1102 if (res_map == NULL)
1103 return NULL;
1104 res_map->start = start;
1105 res_map->page_cnt = page_cnt;
1106 res_map->map = (unsigned long *)(res_map + 1);
1107 return res_map;
1108}
1109
1110static void cleanup_resmap(struct resmap *res_map)
1111{
1112 kfree(res_map);
1113}
1114
1115static inline int resmap_mem_type(unsigned long start)
1116{
1117 if (start >= OMAP2_SRAM_START &&
1118 start < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
1119 return OMAPFB_MEMTYPE_SRAM;
1120 else
1121 return OMAPFB_MEMTYPE_SDRAM;
1122}
1123
1124static inline int resmap_page_reserved(struct resmap *res_map, unsigned page_nr)
1125{
1126 return *RESMAP_PTR(res_map, page_nr) & RESMAP_MASK(page_nr) ? 1 : 0;
1127}
1128
1129static inline void resmap_reserve_page(struct resmap *res_map, unsigned page_nr)
1130{
1131 BUG_ON(resmap_page_reserved(res_map, page_nr));
1132 *RESMAP_PTR(res_map, page_nr) |= RESMAP_MASK(page_nr);
1133}
1134
1135static inline void resmap_free_page(struct resmap *res_map, unsigned page_nr)
1136{
1137 BUG_ON(!resmap_page_reserved(res_map, page_nr));
1138 *RESMAP_PTR(res_map, page_nr) &= ~RESMAP_MASK(page_nr);
1139}
1140
1141static void resmap_reserve_region(unsigned long start, size_t size)
1142{
1143
1144 struct resmap *res_map;
1145 unsigned start_page;
1146 unsigned end_page;
1147 int mtype;
1148 unsigned i;
1149
1150 mtype = resmap_mem_type(start);
1151 res_map = dispc.res_map[mtype];
1152 dev_dbg(dispc.fbdev->dev, "reserve mem type %d start %08lx size %d\n",
1153 mtype, start, size);
1154 start_page = (start - res_map->start) / PAGE_SIZE;
1155 end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
1156 for (i = start_page; i < end_page; i++)
1157 resmap_reserve_page(res_map, i);
1158}
1159
1160static void resmap_free_region(unsigned long start, size_t size)
1161{
1162 struct resmap *res_map;
1163 unsigned start_page;
1164 unsigned end_page;
1165 unsigned i;
1166 int mtype;
1167
1168 mtype = resmap_mem_type(start);
1169 res_map = dispc.res_map[mtype];
1170 dev_dbg(dispc.fbdev->dev, "free mem type %d start %08lx size %d\n",
1171 mtype, start, size);
1172 start_page = (start - res_map->start) / PAGE_SIZE;
1173 end_page = start_page + PAGE_ALIGN(size) / PAGE_SIZE;
1174 for (i = start_page; i < end_page; i++)
1175 resmap_free_page(res_map, i);
1176}
1177
1178static unsigned long resmap_alloc_region(int mtype, size_t size)
1179{
1180 unsigned i;
1181 unsigned total;
1182 unsigned start_page;
1183 unsigned long start;
1184 struct resmap *res_map = dispc.res_map[mtype];
1185
1186 BUG_ON(mtype >= DISPC_MEMTYPE_NUM || res_map == NULL || !size);
1187
1188 size = PAGE_ALIGN(size) / PAGE_SIZE;
1189 start_page = 0;
1190 total = 0;
1191 for (i = 0; i < res_map->page_cnt; i++) {
1192 if (resmap_page_reserved(res_map, i)) {
1193 start_page = i + 1;
1194 total = 0;
1195 } else if (++total == size)
1196 break;
1197 }
1198 if (total < size)
1199 return 0;
1200
1201 start = res_map->start + start_page * PAGE_SIZE;
1202 resmap_reserve_region(start, size * PAGE_SIZE);
1203
1204 return start;
1205}
1206
1207/* Note that this will only work for user mappings, we don't deal with
1208 * kernel mappings here, so fbcon will keep using the old region.
1209 */
1210static int omap_dispc_setup_mem(int plane, size_t size, int mem_type,
1211 unsigned long *paddr)
1212{
1213 struct omapfb_mem_region *rg;
1214 unsigned long new_addr = 0;
1215
1216 if ((unsigned)plane > dispc.mem_desc.region_cnt)
1217 return -EINVAL;
1218 if (mem_type >= DISPC_MEMTYPE_NUM)
1219 return -EINVAL;
1220 if (dispc.res_map[mem_type] == NULL)
1221 return -ENOMEM;
1222 rg = &dispc.mem_desc.region[plane];
1223 if (size == rg->size && mem_type == rg->type)
1224 return 0;
1225 if (atomic_read(&dispc.map_count[plane]))
1226 return -EBUSY;
1227 if (rg->size != 0)
1228 resmap_free_region(rg->paddr, rg->size);
1229 if (size != 0) {
1230 new_addr = resmap_alloc_region(mem_type, size);
1231 if (!new_addr) {
1232 /* Reallocate old region. */
1233 resmap_reserve_region(rg->paddr, rg->size);
1234 return -ENOMEM;
1235 }
1236 }
1237 rg->paddr = new_addr;
1238 rg->size = size;
1239 rg->type = mem_type;
1240
1241 *paddr = new_addr;
1242
1243 return 0;
1244}
1245
1246static int setup_fbmem(struct omapfb_mem_desc *req_md)
1247{
1248 struct omapfb_mem_region *rg;
1249 int i;
1250 int r;
1251 unsigned long mem_start[DISPC_MEMTYPE_NUM];
1252 unsigned long mem_end[DISPC_MEMTYPE_NUM];
1253
1254 if (!req_md->region_cnt) {
1255 dev_err(dispc.fbdev->dev, "no memory regions defined\n");
1256 return -ENOENT;
1257 }
1258
1259 rg = &req_md->region[0];
1260 memset(mem_start, 0xff, sizeof(mem_start));
1261 memset(mem_end, 0, sizeof(mem_end));
1262
1263 for (i = 0; i < req_md->region_cnt; i++, rg++) {
1264 int mtype;
1265 if (rg->paddr) {
1266 rg->alloc = 0;
1267 if (rg->vaddr == NULL) {
1268 rg->map = 1;
1269 if ((r = mmap_kern(rg)) < 0)
1270 return r;
1271 }
1272 } else {
1273 if (rg->type != OMAPFB_MEMTYPE_SDRAM) {
1274 dev_err(dispc.fbdev->dev,
1275 "unsupported memory type\n");
1276 return -EINVAL;
1277 }
1278 rg->alloc = rg->map = 1;
1279 if ((r = alloc_fbmem(rg)) < 0)
1280 return r;
1281 }
1282 mtype = rg->type;
1283
1284 if (rg->paddr < mem_start[mtype])
1285 mem_start[mtype] = rg->paddr;
1286 if (rg->paddr + rg->size > mem_end[mtype])
1287 mem_end[mtype] = rg->paddr + rg->size;
1288 }
1289
1290 for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
1291 unsigned long start;
1292 size_t size;
1293 if (mem_end[i] == 0)
1294 continue;
1295 start = mem_start[i];
1296 size = mem_end[i] - start;
1297 dispc.res_map[i] = init_resmap(start, size);
1298 r = -ENOMEM;
1299 if (dispc.res_map[i] == NULL)
1300 goto fail;
1301 /* Initial state is that everything is reserved. This
1302 * includes possible holes as well, which will never be
1303 * freed.
1304 */
1305 resmap_reserve_region(start, size);
1306 }
1307
1308 dispc.mem_desc = *req_md;
1309
1310 return 0;
1311fail:
1312 for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
1313 if (dispc.res_map[i] != NULL)
1314 cleanup_resmap(dispc.res_map[i]);
1315 }
1316 return r;
1317}
1318
1319static void cleanup_fbmem(void)
1320{
1321 struct omapfb_mem_region *rg;
1322 int i;
1323
1324 for (i = 0; i < DISPC_MEMTYPE_NUM; i++) {
1325 if (dispc.res_map[i] != NULL)
1326 cleanup_resmap(dispc.res_map[i]);
1327 }
1328 rg = &dispc.mem_desc.region[0];
1329 for (i = 0; i < dispc.mem_desc.region_cnt; i++, rg++) {
1330 if (rg->alloc)
1331 free_fbmem(rg);
1332 else {
1333 if (rg->map)
1334 unmap_kern(rg);
1335 }
1336 }
1337}
1338
1339static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
1340 struct omapfb_mem_desc *req_vram)
1341{
1342 int r;
1343 u32 l;
1344 struct lcd_panel *panel = fbdev->panel;
1345 int tmo = 10000;
1346 int skip_init = 0;
1347 int i;
1348
1349 memset(&dispc, 0, sizeof(dispc));
1350
1351 dispc.base = io_p2v(DISPC_BASE);
1352 dispc.fbdev = fbdev;
1353 dispc.ext_mode = ext_mode;
1354
1355 init_completion(&dispc.frame_done);
1356
1357 if ((r = get_dss_clocks()) < 0)
1358 return r;
1359
1360 enable_interface_clocks(1);
1361 enable_lcd_clocks(1);
1362
1363#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
1364 l = dispc_read_reg(DISPC_CONTROL);
1365 /* LCD enabled ? */
1366 if (l & 1) {
1367 pr_info("omapfb: skipping hardware initialization\n");
1368 skip_init = 1;
1369 }
1370#endif
1371
1372 if (!skip_init) {
1373 /* Reset monitoring works only w/ the 54M clk */
1374 enable_digit_clocks(1);
1375
1376 /* Soft reset */
1377 MOD_REG_FLD(DISPC_SYSCONFIG, 1 << 1, 1 << 1);
1378
1379 while (!(dispc_read_reg(DISPC_SYSSTATUS) & 1)) {
1380 if (!--tmo) {
1381 dev_err(dispc.fbdev->dev, "soft reset failed\n");
1382 r = -ENODEV;
1383 enable_digit_clocks(0);
1384 goto fail1;
1385 }
1386 }
1387
1388 enable_digit_clocks(0);
1389 }
1390
1391 /* Enable smart idle and autoidle */
1392 l = dispc_read_reg(DISPC_CONTROL);
1393 l &= ~((3 << 12) | (3 << 3));
1394 l |= (2 << 12) | (2 << 3) | (1 << 0);
1395 dispc_write_reg(DISPC_SYSCONFIG, l);
1396 omap_writel(1 << 0, DSS_BASE + DSS_SYSCONFIG);
1397
1398 /* Set functional clock autogating */
1399 l = dispc_read_reg(DISPC_CONFIG);
1400 l |= 1 << 9;
1401 dispc_write_reg(DISPC_CONFIG, l);
1402
1403 l = dispc_read_reg(DISPC_IRQSTATUS);
1404 dispc_write_reg(l, DISPC_IRQSTATUS);
1405
1406 /* Enable those that we handle always */
1407 omap_dispc_enable_irqs(DISPC_IRQ_FRAMEMASK);
1408
1409 if ((r = request_irq(INT_24XX_DSS_IRQ, omap_dispc_irq_handler,
1410 0, MODULE_NAME, fbdev)) < 0) {
1411 dev_err(dispc.fbdev->dev, "can't get DSS IRQ\n");
1412 goto fail1;
1413 }
1414
1415 /* L3 firewall setting: enable access to OCM RAM */
1416 __raw_writel(0x402000b0, io_p2v(0x680050a0));
1417
1418 if ((r = alloc_palette_ram()) < 0)
1419 goto fail2;
1420
1421 if ((r = setup_fbmem(req_vram)) < 0)
1422 goto fail3;
1423
1424 if (!skip_init) {
1425 for (i = 0; i < dispc.mem_desc.region_cnt; i++) {
1426 memset(dispc.mem_desc.region[i].vaddr, 0,
1427 dispc.mem_desc.region[i].size);
1428 }
1429
1430 /* Set logic clock to fck, pixel clock to fck/2 for now */
1431 MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(16, 8), 1 << 16);
1432 MOD_REG_FLD(DISPC_DIVISOR, FLD_MASK(0, 8), 2 << 0);
1433
1434 setup_plane_fifo(0, ext_mode);
1435 setup_plane_fifo(1, ext_mode);
1436 setup_plane_fifo(2, ext_mode);
1437
1438 setup_color_conv_coef();
1439
1440 set_lcd_tft_mode(panel->config & OMAP_LCDC_PANEL_TFT);
1441 set_load_mode(DISPC_LOAD_FRAME_ONLY);
1442
1443 if (!ext_mode) {
1444 set_lcd_data_lines(panel->data_lines);
1445 omap_dispc_set_lcd_size(panel->x_res, panel->y_res);
1446 set_lcd_timings();
1447 } else
1448 set_lcd_data_lines(panel->bpp);
1449 enable_rfbi_mode(ext_mode);
1450 }
1451
1452 l = dispc_read_reg(DISPC_REVISION);
1453 pr_info("omapfb: DISPC version %d.%d initialized\n",
1454 l >> 4 & 0x0f, l & 0x0f);
1455 enable_lcd_clocks(0);
1456
1457 return 0;
1458fail3:
1459 free_palette_ram();
1460fail2:
1461 free_irq(INT_24XX_DSS_IRQ, fbdev);
1462fail1:
1463 enable_lcd_clocks(0);
1464 enable_interface_clocks(0);
1465 put_dss_clocks();
1466
1467 return r;
1468}
1469
1470static void omap_dispc_cleanup(void)
1471{
1472 int i;
1473
1474 omap_dispc_set_update_mode(OMAPFB_UPDATE_DISABLED);
1475 /* This will also disable clocks that are on */
1476 for (i = 0; i < dispc.mem_desc.region_cnt; i++)
1477 omap_dispc_enable_plane(i, 0);
1478 cleanup_fbmem();
1479 free_palette_ram();
1480 free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
1481 enable_interface_clocks(0);
1482 put_dss_clocks();
1483}
1484
1485const struct lcd_ctrl omap2_int_ctrl = {
1486 .name = "internal",
1487 .init = omap_dispc_init,
1488 .cleanup = omap_dispc_cleanup,
1489 .get_caps = omap_dispc_get_caps,
1490 .set_update_mode = omap_dispc_set_update_mode,
1491 .get_update_mode = omap_dispc_get_update_mode,
1492 .update_window = omap_dispc_update_window,
1493 .suspend = omap_dispc_suspend,
1494 .resume = omap_dispc_resume,
1495 .setup_plane = omap_dispc_setup_plane,
1496 .setup_mem = omap_dispc_setup_mem,
1497 .set_scale = omap_dispc_set_scale,
1498 .enable_plane = omap_dispc_enable_plane,
1499 .set_color_key = omap_dispc_set_color_key,
1500 .get_color_key = omap_dispc_get_color_key,
1501 .mmap = omap_dispc_mmap_user,
1502};
diff --git a/drivers/video/omap/dispc.h b/drivers/video/omap/dispc.h
new file mode 100644
index 000000000000..eb1512b56ce8
--- /dev/null
+++ b/drivers/video/omap/dispc.h
@@ -0,0 +1,43 @@
1#ifndef _DISPC_H
2#define _DISPC_H
3
4#include <linux/interrupt.h>
5
6#define DISPC_PLANE_GFX 0
7#define DISPC_PLANE_VID1 1
8#define DISPC_PLANE_VID2 2
9
10#define DISPC_RGB_1_BPP 0x00
11#define DISPC_RGB_2_BPP 0x01
12#define DISPC_RGB_4_BPP 0x02
13#define DISPC_RGB_8_BPP 0x03
14#define DISPC_RGB_12_BPP 0x04
15#define DISPC_RGB_16_BPP 0x06
16#define DISPC_RGB_24_BPP 0x08
17#define DISPC_RGB_24_BPP_UNPACK_32 0x09
18#define DISPC_YUV2_422 0x0a
19#define DISPC_UYVY_422 0x0b
20
21#define DISPC_BURST_4x32 0
22#define DISPC_BURST_8x32 1
23#define DISPC_BURST_16x32 2
24
25#define DISPC_LOAD_CLUT_AND_FRAME 0x00
26#define DISPC_LOAD_CLUT_ONLY 0x01
27#define DISPC_LOAD_FRAME_ONLY 0x02
28#define DISPC_LOAD_CLUT_ONCE_FRAME 0x03
29
30#define DISPC_TFT_DATA_LINES_12 0
31#define DISPC_TFT_DATA_LINES_16 1
32#define DISPC_TFT_DATA_LINES_18 2
33#define DISPC_TFT_DATA_LINES_24 3
34
35extern void omap_dispc_set_lcd_size(int width, int height);
36
37extern void omap_dispc_enable_lcd_out(int enable);
38extern void omap_dispc_enable_digit_out(int enable);
39
40extern int omap_dispc_request_irq(void (*callback)(void *data), void *data);
41extern void omap_dispc_free_irq(void);
42
43#endif
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
new file mode 100644
index 000000000000..dc48e02f215c
--- /dev/null
+++ b/drivers/video/omap/hwa742.c
@@ -0,0 +1,1077 @@
1/*
2 * Epson HWA742 LCD controller driver
3 *
4 * Copyright (C) 2004-2005 Nokia Corporation
5 * Authors: Juha Yrjölä <juha.yrjola@nokia.com>
6 * Imre Deak <imre.deak@nokia.com>
7 * YUV support: Jussi Laako <jussi.laako@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/fb.h>
26#include <linux/delay.h>
27#include <linux/clk.h>
28
29#include <asm/arch/dma.h>
30#include <asm/arch/omapfb.h>
31#include <asm/arch/hwa742.h>
32
33#define HWA742_REV_CODE_REG 0x0
34#define HWA742_CONFIG_REG 0x2
35#define HWA742_PLL_DIV_REG 0x4
36#define HWA742_PLL_0_REG 0x6
37#define HWA742_PLL_1_REG 0x8
38#define HWA742_PLL_2_REG 0xa
39#define HWA742_PLL_3_REG 0xc
40#define HWA742_PLL_4_REG 0xe
41#define HWA742_CLK_SRC_REG 0x12
42#define HWA742_PANEL_TYPE_REG 0x14
43#define HWA742_H_DISP_REG 0x16
44#define HWA742_H_NDP_REG 0x18
45#define HWA742_V_DISP_1_REG 0x1a
46#define HWA742_V_DISP_2_REG 0x1c
47#define HWA742_V_NDP_REG 0x1e
48#define HWA742_HS_W_REG 0x20
49#define HWA742_HP_S_REG 0x22
50#define HWA742_VS_W_REG 0x24
51#define HWA742_VP_S_REG 0x26
52#define HWA742_PCLK_POL_REG 0x28
53#define HWA742_INPUT_MODE_REG 0x2a
54#define HWA742_TRANSL_MODE_REG1 0x2e
55#define HWA742_DISP_MODE_REG 0x34
56#define HWA742_WINDOW_TYPE 0x36
57#define HWA742_WINDOW_X_START_0 0x38
58#define HWA742_WINDOW_X_START_1 0x3a
59#define HWA742_WINDOW_Y_START_0 0x3c
60#define HWA742_WINDOW_Y_START_1 0x3e
61#define HWA742_WINDOW_X_END_0 0x40
62#define HWA742_WINDOW_X_END_1 0x42
63#define HWA742_WINDOW_Y_END_0 0x44
64#define HWA742_WINDOW_Y_END_1 0x46
65#define HWA742_MEMORY_WRITE_LSB 0x48
66#define HWA742_MEMORY_WRITE_MSB 0x49
67#define HWA742_MEMORY_READ_0 0x4a
68#define HWA742_MEMORY_READ_1 0x4c
69#define HWA742_MEMORY_READ_2 0x4e
70#define HWA742_POWER_SAVE 0x56
71#define HWA742_NDP_CTRL 0x58
72
73#define HWA742_AUTO_UPDATE_TIME (HZ / 20)
74
75/* Reserve 4 request slots for requests in irq context */
76#define REQ_POOL_SIZE 24
77#define IRQ_REQ_POOL_SIZE 4
78
79#define REQ_FROM_IRQ_POOL 0x01
80
81#define REQ_COMPLETE 0
82#define REQ_PENDING 1
83
84struct update_param {
85 int x, y, width, height;
86 int color_mode;
87 int flags;
88};
89
90struct hwa742_request {
91 struct list_head entry;
92 unsigned int flags;
93
94 int (*handler)(struct hwa742_request *req);
95 void (*complete)(void *data);
96 void *complete_data;
97
98 union {
99 struct update_param update;
100 struct completion *sync;
101 } par;
102};
103
104struct {
105 enum omapfb_update_mode update_mode;
106 enum omapfb_update_mode update_mode_before_suspend;
107
108 struct timer_list auto_update_timer;
109 int stop_auto_update;
110 struct omapfb_update_window auto_update_window;
111 unsigned te_connected:1;
112 unsigned vsync_only:1;
113
114 struct hwa742_request req_pool[REQ_POOL_SIZE];
115 struct list_head pending_req_list;
116 struct list_head free_req_list;
117 struct semaphore req_sema;
118 spinlock_t req_lock;
119
120 struct extif_timings reg_timings, lut_timings;
121
122 int prev_color_mode;
123 int prev_flags;
124 int window_type;
125
126 u32 max_transmit_size;
127 u32 extif_clk_period;
128 unsigned long pix_tx_time;
129 unsigned long line_upd_time;
130
131
132 struct omapfb_device *fbdev;
133 struct lcd_ctrl_extif *extif;
134 struct lcd_ctrl *int_ctrl;
135
136 void (*power_up)(struct device *dev);
137 void (*power_down)(struct device *dev);
138} hwa742;
139
140struct lcd_ctrl hwa742_ctrl;
141
142static u8 hwa742_read_reg(u8 reg)
143{
144 u8 data;
145
146 hwa742.extif->set_bits_per_cycle(8);
147 hwa742.extif->write_command(&reg, 1);
148 hwa742.extif->read_data(&data, 1);
149
150 return data;
151}
152
153static void hwa742_write_reg(u8 reg, u8 data)
154{
155 hwa742.extif->set_bits_per_cycle(8);
156 hwa742.extif->write_command(&reg, 1);
157 hwa742.extif->write_data(&data, 1);
158}
159
160static void set_window_regs(int x_start, int y_start, int x_end, int y_end)
161{
162 u8 tmp[8];
163 u8 cmd;
164
165 x_end--;
166 y_end--;
167 tmp[0] = x_start;
168 tmp[1] = x_start >> 8;
169 tmp[2] = y_start;
170 tmp[3] = y_start >> 8;
171 tmp[4] = x_end;
172 tmp[5] = x_end >> 8;
173 tmp[6] = y_end;
174 tmp[7] = y_end >> 8;
175
176 hwa742.extif->set_bits_per_cycle(8);
177 cmd = HWA742_WINDOW_X_START_0;
178
179 hwa742.extif->write_command(&cmd, 1);
180
181 hwa742.extif->write_data(tmp, 8);
182}
183
184static void set_format_regs(int conv, int transl, int flags)
185{
186 if (flags & OMAPFB_FORMAT_FLAG_DOUBLE) {
187 hwa742.window_type = ((hwa742.window_type & 0xfc) | 0x01);
188#ifdef VERBOSE
189 dev_dbg(hwa742.fbdev->dev, "hwa742: enabled pixel doubling\n");
190#endif
191 } else {
192 hwa742.window_type = (hwa742.window_type & 0xfc);
193#ifdef VERBOSE
194 dev_dbg(hwa742.fbdev->dev, "hwa742: disabled pixel doubling\n");
195#endif
196 }
197
198 hwa742_write_reg(HWA742_INPUT_MODE_REG, conv);
199 hwa742_write_reg(HWA742_TRANSL_MODE_REG1, transl);
200 hwa742_write_reg(HWA742_WINDOW_TYPE, hwa742.window_type);
201}
202
203static void enable_tearsync(int y, int width, int height, int screen_height,
204 int force_vsync)
205{
206 u8 b;
207
208 b = hwa742_read_reg(HWA742_NDP_CTRL);
209 b |= 1 << 2;
210 hwa742_write_reg(HWA742_NDP_CTRL, b);
211
212 if (likely(hwa742.vsync_only || force_vsync)) {
213 hwa742.extif->enable_tearsync(1, 0);
214 return;
215 }
216
217 if (width * hwa742.pix_tx_time < hwa742.line_upd_time) {
218 hwa742.extif->enable_tearsync(1, 0);
219 return;
220 }
221
222 if ((width * hwa742.pix_tx_time / 1000) * height <
223 (y + height) * (hwa742.line_upd_time / 1000)) {
224 hwa742.extif->enable_tearsync(1, 0);
225 return;
226 }
227
228 hwa742.extif->enable_tearsync(1, y + 1);
229}
230
231static void disable_tearsync(void)
232{
233 u8 b;
234
235 hwa742.extif->enable_tearsync(0, 0);
236
237 b = hwa742_read_reg(HWA742_NDP_CTRL);
238 b &= ~(1 << 2);
239 hwa742_write_reg(HWA742_NDP_CTRL, b);
240}
241
242static inline struct hwa742_request *alloc_req(void)
243{
244 unsigned long flags;
245 struct hwa742_request *req;
246 int req_flags = 0;
247
248 if (!in_interrupt())
249 down(&hwa742.req_sema);
250 else
251 req_flags = REQ_FROM_IRQ_POOL;
252
253 spin_lock_irqsave(&hwa742.req_lock, flags);
254 BUG_ON(list_empty(&hwa742.free_req_list));
255 req = list_entry(hwa742.free_req_list.next,
256 struct hwa742_request, entry);
257 list_del(&req->entry);
258 spin_unlock_irqrestore(&hwa742.req_lock, flags);
259
260 INIT_LIST_HEAD(&req->entry);
261 req->flags = req_flags;
262
263 return req;
264}
265
266static inline void free_req(struct hwa742_request *req)
267{
268 unsigned long flags;
269
270 spin_lock_irqsave(&hwa742.req_lock, flags);
271
272 list_del(&req->entry);
273 list_add(&req->entry, &hwa742.free_req_list);
274 if (!(req->flags & REQ_FROM_IRQ_POOL))
275 up(&hwa742.req_sema);
276
277 spin_unlock_irqrestore(&hwa742.req_lock, flags);
278}
279
280static void process_pending_requests(void)
281{
282 unsigned long flags;
283
284 spin_lock_irqsave(&hwa742.req_lock, flags);
285
286 while (!list_empty(&hwa742.pending_req_list)) {
287 struct hwa742_request *req;
288 void (*complete)(void *);
289 void *complete_data;
290
291 req = list_entry(hwa742.pending_req_list.next,
292 struct hwa742_request, entry);
293 spin_unlock_irqrestore(&hwa742.req_lock, flags);
294
295 if (req->handler(req) == REQ_PENDING)
296 return;
297
298 complete = req->complete;
299 complete_data = req->complete_data;
300 free_req(req);
301
302 if (complete)
303 complete(complete_data);
304
305 spin_lock_irqsave(&hwa742.req_lock, flags);
306 }
307
308 spin_unlock_irqrestore(&hwa742.req_lock, flags);
309}
310
311static void submit_req_list(struct list_head *head)
312{
313 unsigned long flags;
314 int process = 1;
315
316 spin_lock_irqsave(&hwa742.req_lock, flags);
317 if (likely(!list_empty(&hwa742.pending_req_list)))
318 process = 0;
319 list_splice_init(head, hwa742.pending_req_list.prev);
320 spin_unlock_irqrestore(&hwa742.req_lock, flags);
321
322 if (process)
323 process_pending_requests();
324}
325
326static void request_complete(void *data)
327{
328 struct hwa742_request *req = (struct hwa742_request *)data;
329 void (*complete)(void *);
330 void *complete_data;
331
332 complete = req->complete;
333 complete_data = req->complete_data;
334
335 free_req(req);
336
337 if (complete)
338 complete(complete_data);
339
340 process_pending_requests();
341}
342
343static int send_frame_handler(struct hwa742_request *req)
344{
345 struct update_param *par = &req->par.update;
346 int x = par->x;
347 int y = par->y;
348 int w = par->width;
349 int h = par->height;
350 int bpp;
351 int conv, transl;
352 unsigned long offset;
353 int color_mode = par->color_mode;
354 int flags = par->flags;
355 int scr_width = hwa742.fbdev->panel->x_res;
356 int scr_height = hwa742.fbdev->panel->y_res;
357
358#ifdef VERBOSE
359 dev_dbg(hwa742.fbdev->dev, "x %d y %d w %d h %d scr_width %d "
360 "color_mode %d flags %d\n",
361 x, y, w, h, scr_width, color_mode, flags);
362#endif
363
364 switch (color_mode) {
365 case OMAPFB_COLOR_YUV422:
366 bpp = 16;
367 conv = 0x08;
368 transl = 0x25;
369 break;
370 case OMAPFB_COLOR_YUV420:
371 bpp = 12;
372 conv = 0x09;
373 transl = 0x25;
374 break;
375 case OMAPFB_COLOR_RGB565:
376 bpp = 16;
377 conv = 0x01;
378 transl = 0x05;
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 if (hwa742.prev_flags != flags ||
385 hwa742.prev_color_mode != color_mode) {
386 set_format_regs(conv, transl, flags);
387 hwa742.prev_color_mode = color_mode;
388 hwa742.prev_flags = flags;
389 }
390 flags = req->par.update.flags;
391 if (flags & OMAPFB_FORMAT_FLAG_TEARSYNC)
392 enable_tearsync(y, scr_width, h, scr_height,
393 flags & OMAPFB_FORMAT_FLAG_FORCE_VSYNC);
394 else
395 disable_tearsync();
396
397 set_window_regs(x, y, x + w, y + h);
398
399 offset = (scr_width * y + x) * bpp / 8;
400
401 hwa742.int_ctrl->setup_plane(OMAPFB_PLANE_GFX,
402 OMAPFB_CHANNEL_OUT_LCD, offset, scr_width, 0, 0, w, h,
403 color_mode);
404
405 hwa742.extif->set_bits_per_cycle(16);
406
407 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
408 hwa742.extif->transfer_area(w, h, request_complete, req);
409
410 return REQ_PENDING;
411}
412
413static void send_frame_complete(void *data)
414{
415 hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
416}
417
418#define ADD_PREQ(_x, _y, _w, _h) do { \
419 req = alloc_req(); \
420 req->handler = send_frame_handler; \
421 req->complete = send_frame_complete; \
422 req->par.update.x = _x; \
423 req->par.update.y = _y; \
424 req->par.update.width = _w; \
425 req->par.update.height = _h; \
426 req->par.update.color_mode = color_mode;\
427 req->par.update.flags = flags; \
428 list_add_tail(&req->entry, req_head); \
429} while(0)
430
431static void create_req_list(struct omapfb_update_window *win,
432 struct list_head *req_head)
433{
434 struct hwa742_request *req;
435 int x = win->x;
436 int y = win->y;
437 int width = win->width;
438 int height = win->height;
439 int color_mode;
440 int flags;
441
442 flags = win->format & ~OMAPFB_FORMAT_MASK;
443 color_mode = win->format & OMAPFB_FORMAT_MASK;
444
445 if (x & 1) {
446 ADD_PREQ(x, y, 1, height);
447 width--;
448 x++;
449 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
450 }
451 if (width & ~1) {
452 unsigned int xspan = width & ~1;
453 unsigned int ystart = y;
454 unsigned int yspan = height;
455
456 if (xspan * height * 2 > hwa742.max_transmit_size) {
457 yspan = hwa742.max_transmit_size / (xspan * 2);
458 ADD_PREQ(x, ystart, xspan, yspan);
459 ystart += yspan;
460 yspan = height - yspan;
461 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
462 }
463
464 ADD_PREQ(x, ystart, xspan, yspan);
465 x += xspan;
466 width -= xspan;
467 flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
468 }
469 if (width)
470 ADD_PREQ(x, y, 1, height);
471}
472
473static void auto_update_complete(void *data)
474{
475 if (!hwa742.stop_auto_update)
476 mod_timer(&hwa742.auto_update_timer,
477 jiffies + HWA742_AUTO_UPDATE_TIME);
478}
479
480static void hwa742_update_window_auto(unsigned long arg)
481{
482 LIST_HEAD(req_list);
483 struct hwa742_request *last;
484
485 create_req_list(&hwa742.auto_update_window, &req_list);
486 last = list_entry(req_list.prev, struct hwa742_request, entry);
487
488 last->complete = auto_update_complete;
489 last->complete_data = NULL;
490
491 submit_req_list(&req_list);
492}
493
494int hwa742_update_window_async(struct fb_info *fbi,
495 struct omapfb_update_window *win,
496 void (*complete_callback)(void *arg),
497 void *complete_callback_data)
498{
499 LIST_HEAD(req_list);
500 struct hwa742_request *last;
501 int r = 0;
502
503 if (hwa742.update_mode != OMAPFB_MANUAL_UPDATE) {
504 dev_dbg(hwa742.fbdev->dev, "invalid update mode\n");
505 r = -EINVAL;
506 goto out;
507 }
508 if (unlikely(win->format &
509 ~(0x03 | OMAPFB_FORMAT_FLAG_DOUBLE |
510 OMAPFB_FORMAT_FLAG_TEARSYNC | OMAPFB_FORMAT_FLAG_FORCE_VSYNC))) {
511 dev_dbg(hwa742.fbdev->dev, "invalid window flag");
512 r = -EINVAL;
513 goto out;
514 }
515
516 create_req_list(win, &req_list);
517 last = list_entry(req_list.prev, struct hwa742_request, entry);
518
519 last->complete = complete_callback;
520 last->complete_data = (void *)complete_callback_data;
521
522 submit_req_list(&req_list);
523
524out:
525 return r;
526}
527EXPORT_SYMBOL(hwa742_update_window_async);
528
529static int hwa742_setup_plane(int plane, int channel_out,
530 unsigned long offset, int screen_width,
531 int pos_x, int pos_y, int width, int height,
532 int color_mode)
533{
534 if (plane != OMAPFB_PLANE_GFX ||
535 channel_out != OMAPFB_CHANNEL_OUT_LCD)
536 return -EINVAL;
537
538 return 0;
539}
540
541static int hwa742_enable_plane(int plane, int enable)
542{
543 if (plane != 0)
544 return -EINVAL;
545
546 hwa742.int_ctrl->enable_plane(plane, enable);
547
548 return 0;
549}
550
551static int sync_handler(struct hwa742_request *req)
552{
553 complete(req->par.sync);
554 return REQ_COMPLETE;
555}
556
557static void hwa742_sync(void)
558{
559 LIST_HEAD(req_list);
560 struct hwa742_request *req;
561 struct completion comp;
562
563 req = alloc_req();
564
565 req->handler = sync_handler;
566 req->complete = NULL;
567 init_completion(&comp);
568 req->par.sync = &comp;
569
570 list_add(&req->entry, &req_list);
571 submit_req_list(&req_list);
572
573 wait_for_completion(&comp);
574}
575
576static void hwa742_bind_client(struct omapfb_notifier_block *nb)
577{
578 dev_dbg(hwa742.fbdev->dev, "update_mode %d\n", hwa742.update_mode);
579 if (hwa742.update_mode == OMAPFB_MANUAL_UPDATE) {
580 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
581 }
582}
583
584static int hwa742_set_update_mode(enum omapfb_update_mode mode)
585{
586 if (mode != OMAPFB_MANUAL_UPDATE && mode != OMAPFB_AUTO_UPDATE &&
587 mode != OMAPFB_UPDATE_DISABLED)
588 return -EINVAL;
589
590 if (mode == hwa742.update_mode)
591 return 0;
592
593 dev_info(hwa742.fbdev->dev, "HWA742: setting update mode to %s\n",
594 mode == OMAPFB_UPDATE_DISABLED ? "disabled" :
595 (mode == OMAPFB_AUTO_UPDATE ? "auto" : "manual"));
596
597 switch (hwa742.update_mode) {
598 case OMAPFB_MANUAL_UPDATE:
599 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_DISABLED);
600 break;
601 case OMAPFB_AUTO_UPDATE:
602 hwa742.stop_auto_update = 1;
603 del_timer_sync(&hwa742.auto_update_timer);
604 break;
605 case OMAPFB_UPDATE_DISABLED:
606 break;
607 }
608
609 hwa742.update_mode = mode;
610 hwa742_sync();
611 hwa742.stop_auto_update = 0;
612
613 switch (mode) {
614 case OMAPFB_MANUAL_UPDATE:
615 omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
616 break;
617 case OMAPFB_AUTO_UPDATE:
618 hwa742_update_window_auto(0);
619 break;
620 case OMAPFB_UPDATE_DISABLED:
621 break;
622 }
623
624 return 0;
625}
626
627static enum omapfb_update_mode hwa742_get_update_mode(void)
628{
629 return hwa742.update_mode;
630}
631
632static unsigned long round_to_extif_ticks(unsigned long ps, int div)
633{
634 int bus_tick = hwa742.extif_clk_period * div;
635 return (ps + bus_tick - 1) / bus_tick * bus_tick;
636}
637
638static int calc_reg_timing(unsigned long sysclk, int div)
639{
640 struct extif_timings *t;
641 unsigned long systim;
642
643 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
644 * AccessTime 2 ns + 12.2 ns (regs),
645 * WEOffTime = WEOnTime + 1 ns,
646 * REOffTime = REOnTime + 16 ns (regs),
647 * CSOffTime = REOffTime + 1 ns
648 * ReadCycle = 2ns + 2*SYSCLK (regs),
649 * WriteCycle = 2*SYSCLK + 2 ns,
650 * CSPulseWidth = 10 ns */
651 systim = 1000000000 / (sysclk / 1000);
652 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
653 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
654
655 t = &hwa742.reg_timings;
656 memset(t, 0, sizeof(*t));
657 t->clk_div = div;
658 t->cs_on_time = 0;
659 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
660 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
661 t->access_time = round_to_extif_ticks(t->re_on_time + 12200, div);
662 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
663 t->re_off_time = round_to_extif_ticks(t->re_on_time + 16000, div);
664 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
665 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
666 if (t->we_cycle_time < t->we_off_time)
667 t->we_cycle_time = t->we_off_time;
668 t->re_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
669 if (t->re_cycle_time < t->re_off_time)
670 t->re_cycle_time = t->re_off_time;
671 t->cs_pulse_width = 0;
672
673 dev_dbg(hwa742.fbdev->dev, "[reg]cson %d csoff %d reon %d reoff %d\n",
674 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
675 dev_dbg(hwa742.fbdev->dev, "[reg]weon %d weoff %d recyc %d wecyc %d\n",
676 t->we_on_time, t->we_off_time, t->re_cycle_time,
677 t->we_cycle_time);
678 dev_dbg(hwa742.fbdev->dev, "[reg]rdaccess %d cspulse %d\n",
679 t->access_time, t->cs_pulse_width);
680
681 return hwa742.extif->convert_timings(t);
682}
683
684static int calc_lut_timing(unsigned long sysclk, int div)
685{
686 struct extif_timings *t;
687 unsigned long systim;
688
689 /* CSOnTime 0, WEOnTime 2 ns, REOnTime 2 ns,
690 * AccessTime 2 ns + 4 * SYSCLK + 26 (lut),
691 * WEOffTime = WEOnTime + 1 ns,
692 * REOffTime = REOnTime + 4*SYSCLK + 26 ns (lut),
693 * CSOffTime = REOffTime + 1 ns
694 * ReadCycle = 2ns + 4*SYSCLK + 26 ns (lut),
695 * WriteCycle = 2*SYSCLK + 2 ns,
696 * CSPulseWidth = 10 ns
697 */
698 systim = 1000000000 / (sysclk / 1000);
699 dev_dbg(hwa742.fbdev->dev, "HWA742 systim %lu ps extif_clk_period %u ps"
700 "extif_clk_div %d\n", systim, hwa742.extif_clk_period, div);
701
702 t = &hwa742.lut_timings;
703 memset(t, 0, sizeof(*t));
704
705 t->clk_div = div;
706
707 t->cs_on_time = 0;
708 t->we_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
709 t->re_on_time = round_to_extif_ticks(t->cs_on_time + 2000, div);
710 t->access_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
711 26000, div);
712 t->we_off_time = round_to_extif_ticks(t->we_on_time + 1000, div);
713 t->re_off_time = round_to_extif_ticks(t->re_on_time + 4 * systim +
714 26000, div);
715 t->cs_off_time = round_to_extif_ticks(t->re_off_time + 1000, div);
716 t->we_cycle_time = round_to_extif_ticks(2 * systim + 2000, div);
717 if (t->we_cycle_time < t->we_off_time)
718 t->we_cycle_time = t->we_off_time;
719 t->re_cycle_time = round_to_extif_ticks(2000 + 4 * systim + 26000, div);
720 if (t->re_cycle_time < t->re_off_time)
721 t->re_cycle_time = t->re_off_time;
722 t->cs_pulse_width = 0;
723
724 dev_dbg(hwa742.fbdev->dev, "[lut]cson %d csoff %d reon %d reoff %d\n",
725 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
726 dev_dbg(hwa742.fbdev->dev, "[lut]weon %d weoff %d recyc %d wecyc %d\n",
727 t->we_on_time, t->we_off_time, t->re_cycle_time,
728 t->we_cycle_time);
729 dev_dbg(hwa742.fbdev->dev, "[lut]rdaccess %d cspulse %d\n",
730 t->access_time, t->cs_pulse_width);
731
732 return hwa742.extif->convert_timings(t);
733}
734
735static int calc_extif_timings(unsigned long sysclk, int *extif_mem_div)
736{
737 int max_clk_div;
738 int div;
739
740 hwa742.extif->get_clk_info(&hwa742.extif_clk_period, &max_clk_div);
741 for (div = 1; div < max_clk_div; div++) {
742 if (calc_reg_timing(sysclk, div) == 0)
743 break;
744 }
745 if (div > max_clk_div)
746 goto err;
747
748 *extif_mem_div = div;
749
750 for (div = 1; div < max_clk_div; div++) {
751 if (calc_lut_timing(sysclk, div) == 0)
752 break;
753 }
754
755 if (div > max_clk_div)
756 goto err;
757
758 return 0;
759
760err:
761 dev_err(hwa742.fbdev->dev, "can't setup timings\n");
762 return -1;
763}
764
765static void calc_hwa742_clk_rates(unsigned long ext_clk,
766 unsigned long *sys_clk, unsigned long *pix_clk)
767{
768 int pix_clk_src;
769 int sys_div = 0, sys_mul = 0;
770 int pix_div;
771
772 pix_clk_src = hwa742_read_reg(HWA742_CLK_SRC_REG);
773 pix_div = ((pix_clk_src >> 3) & 0x1f) + 1;
774 if ((pix_clk_src & (0x3 << 1)) == 0) {
775 /* Source is the PLL */
776 sys_div = (hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x3f) + 1;
777 sys_mul = (hwa742_read_reg(HWA742_PLL_4_REG) & 0x7f) + 1;
778 *sys_clk = ext_clk * sys_mul / sys_div;
779 } else /* else source is ext clk, or oscillator */
780 *sys_clk = ext_clk;
781
782 *pix_clk = *sys_clk / pix_div; /* HZ */
783 dev_dbg(hwa742.fbdev->dev,
784 "ext_clk %ld pix_src %d pix_div %d sys_div %d sys_mul %d\n",
785 ext_clk, pix_clk_src & (0x3 << 1), pix_div, sys_div, sys_mul);
786 dev_dbg(hwa742.fbdev->dev, "sys_clk %ld pix_clk %ld\n",
787 *sys_clk, *pix_clk);
788}
789
790
791static int setup_tearsync(unsigned long pix_clk, int extif_div)
792{
793 int hdisp, vdisp;
794 int hndp, vndp;
795 int hsw, vsw;
796 int hs, vs;
797 int hs_pol_inv, vs_pol_inv;
798 int use_hsvs, use_ndp;
799 u8 b;
800
801 hsw = hwa742_read_reg(HWA742_HS_W_REG);
802 vsw = hwa742_read_reg(HWA742_VS_W_REG);
803 hs_pol_inv = !(hsw & 0x80);
804 vs_pol_inv = !(vsw & 0x80);
805 hsw = hsw & 0x7f;
806 vsw = vsw & 0x3f;
807
808 hdisp = (hwa742_read_reg(HWA742_H_DISP_REG) & 0x7f) * 8;
809 vdisp = hwa742_read_reg(HWA742_V_DISP_1_REG) +
810 ((hwa742_read_reg(HWA742_V_DISP_2_REG) & 0x3) << 8);
811
812 hndp = hwa742_read_reg(HWA742_H_NDP_REG) & 0x7f;
813 vndp = hwa742_read_reg(HWA742_V_NDP_REG);
814
815 /* time to transfer one pixel (16bpp) in ps */
816 hwa742.pix_tx_time = hwa742.reg_timings.we_cycle_time;
817 if (hwa742.extif->get_max_tx_rate != NULL) {
818 /*
819 * The external interface might have a rate limitation,
820 * if so, we have to maximize our transfer rate.
821 */
822 unsigned long min_tx_time;
823 unsigned long max_tx_rate = hwa742.extif->get_max_tx_rate();
824
825 dev_dbg(hwa742.fbdev->dev, "max_tx_rate %ld HZ\n",
826 max_tx_rate);
827 min_tx_time = 1000000000 / (max_tx_rate / 1000); /* ps */
828 if (hwa742.pix_tx_time < min_tx_time)
829 hwa742.pix_tx_time = min_tx_time;
830 }
831
832 /* time to update one line in ps */
833 hwa742.line_upd_time = (hdisp + hndp) * 1000000 / (pix_clk / 1000);
834 hwa742.line_upd_time *= 1000;
835 if (hdisp * hwa742.pix_tx_time > hwa742.line_upd_time)
836 /*
837 * transfer speed too low, we might have to use both
838 * HS and VS
839 */
840 use_hsvs = 1;
841 else
842 /* decent transfer speed, we'll always use only VS */
843 use_hsvs = 0;
844
845 if (use_hsvs && (hs_pol_inv || vs_pol_inv)) {
846 /*
847 * HS or'ed with VS doesn't work, use the active high
848 * TE signal based on HNDP / VNDP
849 */
850 use_ndp = 1;
851 hs_pol_inv = 0;
852 vs_pol_inv = 0;
853 hs = hndp;
854 vs = vndp;
855 } else {
856 /*
857 * Use HS or'ed with VS as a TE signal if both are needed
858 * or VNDP if only vsync is needed.
859 */
860 use_ndp = 0;
861 hs = hsw;
862 vs = vsw;
863 if (!use_hsvs) {
864 hs_pol_inv = 0;
865 vs_pol_inv = 0;
866 }
867 }
868
869 hs = hs * 1000000 / (pix_clk / 1000); /* ps */
870 hs *= 1000;
871
872 vs = vs * (hdisp + hndp) * 1000000 / (pix_clk / 1000); /* ps */
873 vs *= 1000;
874
875 if (vs <= hs)
876 return -EDOM;
877 /* set VS to 120% of HS to minimize VS detection time */
878 vs = hs * 12 / 10;
879 /* minimize HS too */
880 hs = 10000;
881
882 b = hwa742_read_reg(HWA742_NDP_CTRL);
883 b &= ~0x3;
884 b |= use_hsvs ? 1 : 0;
885 b |= (use_ndp && use_hsvs) ? 0 : 2;
886 hwa742_write_reg(HWA742_NDP_CTRL, b);
887
888 hwa742.vsync_only = !use_hsvs;
889
890 dev_dbg(hwa742.fbdev->dev,
891 "pix_clk %ld HZ pix_tx_time %ld ps line_upd_time %ld ps\n",
892 pix_clk, hwa742.pix_tx_time, hwa742.line_upd_time);
893 dev_dbg(hwa742.fbdev->dev,
894 "hs %d ps vs %d ps mode %d vsync_only %d\n",
895 hs, vs, (b & 0x3), !use_hsvs);
896
897 return hwa742.extif->setup_tearsync(1, hs, vs,
898 hs_pol_inv, vs_pol_inv, extif_div);
899}
900
901static void hwa742_get_caps(int plane, struct omapfb_caps *caps)
902{
903 hwa742.int_ctrl->get_caps(plane, caps);
904 caps->ctrl |= OMAPFB_CAPS_MANUAL_UPDATE |
905 OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE;
906 if (hwa742.te_connected)
907 caps->ctrl |= OMAPFB_CAPS_TEARSYNC;
908 caps->wnd_color |= (1 << OMAPFB_COLOR_RGB565) |
909 (1 << OMAPFB_COLOR_YUV420);
910}
911
912static void hwa742_suspend(void)
913{
914 hwa742.update_mode_before_suspend = hwa742.update_mode;
915 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
916 /* Enable sleep mode */
917 hwa742_write_reg(HWA742_POWER_SAVE, 1 << 1);
918 if (hwa742.power_down != NULL)
919 hwa742.power_down(hwa742.fbdev->dev);
920}
921
922static void hwa742_resume(void)
923{
924 if (hwa742.power_up != NULL)
925 hwa742.power_up(hwa742.fbdev->dev);
926 /* Disable sleep mode */
927 hwa742_write_reg(HWA742_POWER_SAVE, 0);
928 while (1) {
929 /* Loop until PLL output is stabilized */
930 if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
931 break;
932 set_current_state(TASK_UNINTERRUPTIBLE);
933 schedule_timeout(msecs_to_jiffies(5));
934 }
935 hwa742_set_update_mode(hwa742.update_mode_before_suspend);
936}
937
938static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
939 struct omapfb_mem_desc *req_vram)
940{
941 int r = 0, i;
942 u8 rev, conf;
943 unsigned long ext_clk;
944 unsigned long sys_clk, pix_clk;
945 int extif_mem_div;
946 struct omapfb_platform_data *omapfb_conf;
947 struct hwa742_platform_data *ctrl_conf;
948
949 BUG_ON(!fbdev->ext_if || !fbdev->int_ctrl);
950
951 hwa742.fbdev = fbdev;
952 hwa742.extif = fbdev->ext_if;
953 hwa742.int_ctrl = fbdev->int_ctrl;
954
955 omapfb_conf = fbdev->dev->platform_data;
956 ctrl_conf = omapfb_conf->ctrl_platform_data;
957
958 if (ctrl_conf == NULL || ctrl_conf->get_clock_rate == NULL) {
959 dev_err(fbdev->dev, "HWA742: missing platform data\n");
960 r = -ENOENT;
961 goto err1;
962 }
963
964 hwa742.power_down = ctrl_conf->power_down;
965 hwa742.power_up = ctrl_conf->power_up;
966
967 spin_lock_init(&hwa742.req_lock);
968
969 if ((r = hwa742.int_ctrl->init(fbdev, 1, req_vram)) < 0)
970 goto err1;
971
972 if ((r = hwa742.extif->init(fbdev)) < 0)
973 goto err2;
974
975 ext_clk = ctrl_conf->get_clock_rate(fbdev->dev);
976 if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
977 goto err3;
978 hwa742.extif->set_timings(&hwa742.reg_timings);
979 if (hwa742.power_up != NULL)
980 hwa742.power_up(fbdev->dev);
981
982 calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
983 if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
984 goto err4;
985 hwa742.extif->set_timings(&hwa742.reg_timings);
986
987 rev = hwa742_read_reg(HWA742_REV_CODE_REG);
988 if ((rev & 0xfc) != 0x80) {
989 dev_err(fbdev->dev, "HWA742: invalid revision %02x\n", rev);
990 r = -ENODEV;
991 goto err4;
992 }
993
994
995 if (!(hwa742_read_reg(HWA742_PLL_DIV_REG) & 0x80)) {
996 dev_err(fbdev->dev,
997 "HWA742: controller not initialized by the bootloader\n");
998 r = -ENODEV;
999 goto err4;
1000 }
1001
1002 if (ctrl_conf->te_connected) {
1003 if ((r = setup_tearsync(pix_clk, extif_mem_div)) < 0) {
1004 dev_err(hwa742.fbdev->dev,
1005 "HWA742: can't setup tearing synchronization\n");
1006 goto err4;
1007 }
1008 hwa742.te_connected = 1;
1009 }
1010
1011 hwa742.max_transmit_size = hwa742.extif->max_transmit_size;
1012
1013 hwa742.update_mode = OMAPFB_UPDATE_DISABLED;
1014
1015 hwa742.auto_update_window.x = 0;
1016 hwa742.auto_update_window.y = 0;
1017 hwa742.auto_update_window.width = fbdev->panel->x_res;
1018 hwa742.auto_update_window.height = fbdev->panel->y_res;
1019 hwa742.auto_update_window.format = 0;
1020
1021 init_timer(&hwa742.auto_update_timer);
1022 hwa742.auto_update_timer.function = hwa742_update_window_auto;
1023 hwa742.auto_update_timer.data = 0;
1024
1025 hwa742.prev_color_mode = -1;
1026 hwa742.prev_flags = 0;
1027
1028 hwa742.fbdev = fbdev;
1029
1030 INIT_LIST_HEAD(&hwa742.free_req_list);
1031 INIT_LIST_HEAD(&hwa742.pending_req_list);
1032 for (i = 0; i < ARRAY_SIZE(hwa742.req_pool); i++)
1033 list_add(&hwa742.req_pool[i].entry, &hwa742.free_req_list);
1034 BUG_ON(i <= IRQ_REQ_POOL_SIZE);
1035 sema_init(&hwa742.req_sema, i - IRQ_REQ_POOL_SIZE);
1036
1037 conf = hwa742_read_reg(HWA742_CONFIG_REG);
1038 dev_info(fbdev->dev, ": Epson HWA742 LCD controller rev %d "
1039 "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
1040
1041 return 0;
1042err4:
1043 if (hwa742.power_down != NULL)
1044 hwa742.power_down(fbdev->dev);
1045err3:
1046 hwa742.extif->cleanup();
1047err2:
1048 hwa742.int_ctrl->cleanup();
1049err1:
1050 return r;
1051}
1052
1053static void hwa742_cleanup(void)
1054{
1055 hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
1056 hwa742.extif->cleanup();
1057 hwa742.int_ctrl->cleanup();
1058 if (hwa742.power_down != NULL)
1059 hwa742.power_down(hwa742.fbdev->dev);
1060}
1061
1062struct lcd_ctrl hwa742_ctrl = {
1063 .name = "hwa742",
1064 .init = hwa742_init,
1065 .cleanup = hwa742_cleanup,
1066 .bind_client = hwa742_bind_client,
1067 .get_caps = hwa742_get_caps,
1068 .set_update_mode = hwa742_set_update_mode,
1069 .get_update_mode = hwa742_get_update_mode,
1070 .setup_plane = hwa742_setup_plane,
1071 .enable_plane = hwa742_enable_plane,
1072 .update_window = hwa742_update_window_async,
1073 .sync = hwa742_sync,
1074 .suspend = hwa742_suspend,
1075 .resume = hwa742_resume,
1076};
1077
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
new file mode 100644
index 000000000000..51807b4e26d1
--- /dev/null
+++ b/drivers/video/omap/lcd_h3.c
@@ -0,0 +1,141 @@
1/*
2 * LCD panel support for the TI OMAP H3 board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <asm/arch/gpio.h>
26#include <asm/arch/tps65010.h>
27#include <asm/arch/omapfb.h>
28
29#define MODULE_NAME "omapfb-lcd_h3"
30
31#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
32
33static int h3_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
34{
35 return 0;
36}
37
38static void h3_panel_cleanup(struct lcd_panel *panel)
39{
40}
41
42static int h3_panel_enable(struct lcd_panel *panel)
43{
44 int r = 0;
45
46 /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
47 r = tps65010_set_gpio_out_value(GPIO1, HIGH);
48 if (!r)
49 r = tps65010_set_gpio_out_value(GPIO2, HIGH);
50 if (r)
51 pr_err("Unable to turn on LCD panel\n");
52
53 return r;
54}
55
56static void h3_panel_disable(struct lcd_panel *panel)
57{
58 int r = 0;
59
60 /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
61 r = tps65010_set_gpio_out_value(GPIO1, LOW);
62 if (!r)
63 tps65010_set_gpio_out_value(GPIO2, LOW);
64 if (r)
65 pr_err("Unable to turn off LCD panel\n");
66}
67
68static unsigned long h3_panel_get_caps(struct lcd_panel *panel)
69{
70 return 0;
71}
72
73struct lcd_panel h3_panel = {
74 .name = "h3",
75 .config = OMAP_LCDC_PANEL_TFT,
76
77 .data_lines = 16,
78 .bpp = 16,
79 .x_res = 240,
80 .y_res = 320,
81 .pixel_clock = 12000,
82 .hsw = 12,
83 .hfp = 14,
84 .hbp = 72 - 12,
85 .vsw = 1,
86 .vfp = 1,
87 .vbp = 0,
88 .pcd = 0,
89
90 .init = h3_panel_init,
91 .cleanup = h3_panel_cleanup,
92 .enable = h3_panel_enable,
93 .disable = h3_panel_disable,
94 .get_caps = h3_panel_get_caps,
95};
96
97static int h3_panel_probe(struct platform_device *pdev)
98{
99 omapfb_register_panel(&h3_panel);
100 return 0;
101}
102
103static int h3_panel_remove(struct platform_device *pdev)
104{
105 return 0;
106}
107
108static int h3_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
109{
110 return 0;
111}
112
113static int h3_panel_resume(struct platform_device *pdev)
114{
115 return 0;
116}
117
118struct platform_driver h3_panel_driver = {
119 .probe = h3_panel_probe,
120 .remove = h3_panel_remove,
121 .suspend = h3_panel_suspend,
122 .resume = h3_panel_resume,
123 .driver = {
124 .name = "lcd_h3",
125 .owner = THIS_MODULE,
126 },
127};
128
129static int h3_panel_drv_init(void)
130{
131 return platform_driver_register(&h3_panel_driver);
132}
133
134static void h3_panel_drv_cleanup(void)
135{
136 platform_driver_unregister(&h3_panel_driver);
137}
138
139module_init(h3_panel_drv_init);
140module_exit(h3_panel_drv_cleanup);
141
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
new file mode 100644
index 000000000000..fd6f0eb16de1
--- /dev/null
+++ b/drivers/video/omap/lcd_h4.c
@@ -0,0 +1,117 @@
1/*
2 * LCD panel support for the TI OMAP H4 board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <asm/arch/omapfb.h>
26
27static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
28{
29 return 0;
30}
31
32static void h4_panel_cleanup(struct lcd_panel *panel)
33{
34}
35
36static int h4_panel_enable(struct lcd_panel *panel)
37{
38 return 0;
39}
40
41static void h4_panel_disable(struct lcd_panel *panel)
42{
43}
44
45static unsigned long h4_panel_get_caps(struct lcd_panel *panel)
46{
47 return 0;
48}
49
50struct lcd_panel h4_panel = {
51 .name = "h4",
52 .config = OMAP_LCDC_PANEL_TFT,
53
54 .bpp = 16,
55 .data_lines = 16,
56 .x_res = 240,
57 .y_res = 320,
58 .pixel_clock = 6250,
59 .hsw = 15,
60 .hfp = 15,
61 .hbp = 60,
62 .vsw = 1,
63 .vfp = 1,
64 .vbp = 1,
65
66 .init = h4_panel_init,
67 .cleanup = h4_panel_cleanup,
68 .enable = h4_panel_enable,
69 .disable = h4_panel_disable,
70 .get_caps = h4_panel_get_caps,
71};
72
73static int h4_panel_probe(struct platform_device *pdev)
74{
75 omapfb_register_panel(&h4_panel);
76 return 0;
77}
78
79static int h4_panel_remove(struct platform_device *pdev)
80{
81 return 0;
82}
83
84static int h4_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
85{
86 return 0;
87}
88
89static int h4_panel_resume(struct platform_device *pdev)
90{
91 return 0;
92}
93
94struct platform_driver h4_panel_driver = {
95 .probe = h4_panel_probe,
96 .remove = h4_panel_remove,
97 .suspend = h4_panel_suspend,
98 .resume = h4_panel_resume,
99 .driver = {
100 .name = "lcd_h4",
101 .owner = THIS_MODULE,
102 },
103};
104
105static int h4_panel_drv_init(void)
106{
107 return platform_driver_register(&h4_panel_driver);
108}
109
110static void h4_panel_drv_cleanup(void)
111{
112 platform_driver_unregister(&h4_panel_driver);
113}
114
115module_init(h4_panel_drv_init);
116module_exit(h4_panel_drv_cleanup);
117
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
new file mode 100644
index 000000000000..551f385861d1
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -0,0 +1,124 @@
1/*
2 * LCD panel support for the TI OMAP1510 Innovator board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/io.h>
25
26#include <asm/arch/fpga.h>
27#include <asm/arch/omapfb.h>
28
29static int innovator1510_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void innovator1510_panel_cleanup(struct lcd_panel *panel)
36{
37}
38
39static int innovator1510_panel_enable(struct lcd_panel *panel)
40{
41 fpga_write(0x7, OMAP1510_FPGA_LCD_PANEL_CONTROL);
42 return 0;
43}
44
45static void innovator1510_panel_disable(struct lcd_panel *panel)
46{
47 fpga_write(0x0, OMAP1510_FPGA_LCD_PANEL_CONTROL);
48}
49
50static unsigned long innovator1510_panel_get_caps(struct lcd_panel *panel)
51{
52 return 0;
53}
54
55struct lcd_panel innovator1510_panel = {
56 .name = "inn1510",
57 .config = OMAP_LCDC_PANEL_TFT,
58
59 .bpp = 16,
60 .data_lines = 16,
61 .x_res = 240,
62 .y_res = 320,
63 .pixel_clock = 12500,
64 .hsw = 40,
65 .hfp = 40,
66 .hbp = 72,
67 .vsw = 1,
68 .vfp = 1,
69 .vbp = 0,
70 .pcd = 12,
71
72 .init = innovator1510_panel_init,
73 .cleanup = innovator1510_panel_cleanup,
74 .enable = innovator1510_panel_enable,
75 .disable = innovator1510_panel_disable,
76 .get_caps = innovator1510_panel_get_caps,
77};
78
79static int innovator1510_panel_probe(struct platform_device *pdev)
80{
81 omapfb_register_panel(&innovator1510_panel);
82 return 0;
83}
84
85static int innovator1510_panel_remove(struct platform_device *pdev)
86{
87 return 0;
88}
89
90static int innovator1510_panel_suspend(struct platform_device *pdev,
91 pm_message_t mesg)
92{
93 return 0;
94}
95
96static int innovator1510_panel_resume(struct platform_device *pdev)
97{
98 return 0;
99}
100
101struct platform_driver innovator1510_panel_driver = {
102 .probe = innovator1510_panel_probe,
103 .remove = innovator1510_panel_remove,
104 .suspend = innovator1510_panel_suspend,
105 .resume = innovator1510_panel_resume,
106 .driver = {
107 .name = "lcd_inn1510",
108 .owner = THIS_MODULE,
109 },
110};
111
112static int innovator1510_panel_drv_init(void)
113{
114 return platform_driver_register(&innovator1510_panel_driver);
115}
116
117static void innovator1510_panel_drv_cleanup(void)
118{
119 platform_driver_unregister(&innovator1510_panel_driver);
120}
121
122module_init(innovator1510_panel_drv_init);
123module_exit(innovator1510_panel_drv_cleanup);
124
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
new file mode 100644
index 000000000000..95604ca43301
--- /dev/null
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -0,0 +1,150 @@
1/*
2 * LCD panel support for the TI OMAP1610 Innovator board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <asm/arch/gpio.h>
26#include <asm/arch/omapfb.h>
27
28#define MODULE_NAME "omapfb-lcd_h3"
29
30#define pr_err(fmt, args...) printk(KERN_ERR MODULE_NAME ": " fmt, ## args)
31
32static int innovator1610_panel_init(struct lcd_panel *panel,
33 struct omapfb_device *fbdev)
34{
35 int r = 0;
36
37 if (omap_request_gpio(14)) {
38 pr_err("can't request GPIO 14\n");
39 r = -1;
40 goto exit;
41 }
42 if (omap_request_gpio(15)) {
43 pr_err("can't request GPIO 15\n");
44 omap_free_gpio(14);
45 r = -1;
46 goto exit;
47 }
48 /* configure GPIO(14, 15) as outputs */
49 omap_set_gpio_direction(14, 0);
50 omap_set_gpio_direction(15, 0);
51exit:
52 return r;
53}
54
55static void innovator1610_panel_cleanup(struct lcd_panel *panel)
56{
57 omap_free_gpio(15);
58 omap_free_gpio(14);
59}
60
61static int innovator1610_panel_enable(struct lcd_panel *panel)
62{
63 /* set GPIO14 and GPIO15 high */
64 omap_set_gpio_dataout(14, 1);
65 omap_set_gpio_dataout(15, 1);
66 return 0;
67}
68
69static void innovator1610_panel_disable(struct lcd_panel *panel)
70{
71 /* set GPIO13, GPIO14 and GPIO15 low */
72 omap_set_gpio_dataout(14, 0);
73 omap_set_gpio_dataout(15, 0);
74}
75
76static unsigned long innovator1610_panel_get_caps(struct lcd_panel *panel)
77{
78 return 0;
79}
80
81struct lcd_panel innovator1610_panel = {
82 .name = "inn1610",
83 .config = OMAP_LCDC_PANEL_TFT,
84
85 .bpp = 16,
86 .data_lines = 16,
87 .x_res = 320,
88 .y_res = 240,
89 .pixel_clock = 12500,
90 .hsw = 40,
91 .hfp = 40,
92 .hbp = 72,
93 .vsw = 1,
94 .vfp = 1,
95 .vbp = 0,
96 .pcd = 12,
97
98 .init = innovator1610_panel_init,
99 .cleanup = innovator1610_panel_cleanup,
100 .enable = innovator1610_panel_enable,
101 .disable = innovator1610_panel_disable,
102 .get_caps = innovator1610_panel_get_caps,
103};
104
105static int innovator1610_panel_probe(struct platform_device *pdev)
106{
107 omapfb_register_panel(&innovator1610_panel);
108 return 0;
109}
110
111static int innovator1610_panel_remove(struct platform_device *pdev)
112{
113 return 0;
114}
115
116static int innovator1610_panel_suspend(struct platform_device *pdev,
117 pm_message_t mesg)
118{
119 return 0;
120}
121
122static int innovator1610_panel_resume(struct platform_device *pdev)
123{
124 return 0;
125}
126
127struct platform_driver innovator1610_panel_driver = {
128 .probe = innovator1610_panel_probe,
129 .remove = innovator1610_panel_remove,
130 .suspend = innovator1610_panel_suspend,
131 .resume = innovator1610_panel_resume,
132 .driver = {
133 .name = "lcd_inn1610",
134 .owner = THIS_MODULE,
135 },
136};
137
138static int innovator1610_panel_drv_init(void)
139{
140 return platform_driver_register(&innovator1610_panel_driver);
141}
142
143static void innovator1610_panel_drv_cleanup(void)
144{
145 platform_driver_unregister(&innovator1610_panel_driver);
146}
147
148module_init(innovator1610_panel_drv_init);
149module_exit(innovator1610_panel_drv_cleanup);
150
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
new file mode 100644
index 000000000000..a38038840fd6
--- /dev/null
+++ b/drivers/video/omap/lcd_osk.c
@@ -0,0 +1,144 @@
1/*
2 * LCD panel support for the TI OMAP OSK board
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 * Adapted for OSK by <dirk.behme@de.bosch.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25
26#include <asm/arch/gpio.h>
27#include <asm/arch/mux.h>
28#include <asm/arch/omapfb.h>
29
30static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void osk_panel_cleanup(struct lcd_panel *panel)
36{
37}
38
39static int osk_panel_enable(struct lcd_panel *panel)
40{
41 /* configure PWL pin */
42 omap_cfg_reg(PWL);
43
44 /* Enable PWL unit */
45 omap_writeb(0x01, OMAP_PWL_CLK_ENABLE);
46
47 /* Set PWL level */
48 omap_writeb(0xFF, OMAP_PWL_ENABLE);
49
50 /* configure GPIO2 as output */
51 omap_set_gpio_direction(2, 0);
52
53 /* set GPIO2 high */
54 omap_set_gpio_dataout(2, 1);
55
56 return 0;
57}
58
59static void osk_panel_disable(struct lcd_panel *panel)
60{
61 /* Set PWL level to zero */
62 omap_writeb(0x00, OMAP_PWL_ENABLE);
63
64 /* Disable PWL unit */
65 omap_writeb(0x00, OMAP_PWL_CLK_ENABLE);
66
67 /* set GPIO2 low */
68 omap_set_gpio_dataout(2, 0);
69}
70
71static unsigned long osk_panel_get_caps(struct lcd_panel *panel)
72{
73 return 0;
74}
75
76struct lcd_panel osk_panel = {
77 .name = "osk",
78 .config = OMAP_LCDC_PANEL_TFT,
79
80 .bpp = 16,
81 .data_lines = 16,
82 .x_res = 240,
83 .y_res = 320,
84 .pixel_clock = 12500,
85 .hsw = 40,
86 .hfp = 40,
87 .hbp = 72,
88 .vsw = 1,
89 .vfp = 1,
90 .vbp = 0,
91 .pcd = 12,
92
93 .init = osk_panel_init,
94 .cleanup = osk_panel_cleanup,
95 .enable = osk_panel_enable,
96 .disable = osk_panel_disable,
97 .get_caps = osk_panel_get_caps,
98};
99
100static int osk_panel_probe(struct platform_device *pdev)
101{
102 omapfb_register_panel(&osk_panel);
103 return 0;
104}
105
106static int osk_panel_remove(struct platform_device *pdev)
107{
108 return 0;
109}
110
111static int osk_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
112{
113 return 0;
114}
115
116static int osk_panel_resume(struct platform_device *pdev)
117{
118 return 0;
119}
120
121struct platform_driver osk_panel_driver = {
122 .probe = osk_panel_probe,
123 .remove = osk_panel_remove,
124 .suspend = osk_panel_suspend,
125 .resume = osk_panel_resume,
126 .driver = {
127 .name = "lcd_osk",
128 .owner = THIS_MODULE,
129 },
130};
131
132static int osk_panel_drv_init(void)
133{
134 return platform_driver_register(&osk_panel_driver);
135}
136
137static void osk_panel_drv_cleanup(void)
138{
139 platform_driver_unregister(&osk_panel_driver);
140}
141
142module_init(osk_panel_drv_init);
143module_exit(osk_panel_drv_cleanup);
144
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
new file mode 100644
index 000000000000..52bdfdac42c9
--- /dev/null
+++ b/drivers/video/omap/lcd_palmte.c
@@ -0,0 +1,123 @@
1/*
2 * LCD panel support for the Palm Tungsten E
3 *
4 * Original version : Romain Goyet <r.goyet@gmail.com>
5 * Current version : Laurent Gonzalez <palmte.linux@free.fr>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/io.h>
25
26#include <asm/arch/fpga.h>
27#include <asm/arch/omapfb.h>
28
29static int palmte_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void palmte_panel_cleanup(struct lcd_panel *panel)
36{
37}
38
39static int palmte_panel_enable(struct lcd_panel *panel)
40{
41 return 0;
42}
43
44static void palmte_panel_disable(struct lcd_panel *panel)
45{
46}
47
48static unsigned long palmte_panel_get_caps(struct lcd_panel *panel)
49{
50 return 0;
51}
52
53struct lcd_panel palmte_panel = {
54 .name = "palmte",
55 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
56 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
57 OMAP_LCDC_HSVS_OPPOSITE,
58
59 .data_lines = 16,
60 .bpp = 8,
61 .pixel_clock = 12000,
62 .x_res = 320,
63 .y_res = 320,
64 .hsw = 4,
65 .hfp = 8,
66 .hbp = 28,
67 .vsw = 1,
68 .vfp = 8,
69 .vbp = 7,
70 .pcd = 0,
71
72 .init = palmte_panel_init,
73 .cleanup = palmte_panel_cleanup,
74 .enable = palmte_panel_enable,
75 .disable = palmte_panel_disable,
76 .get_caps = palmte_panel_get_caps,
77};
78
79static int palmte_panel_probe(struct platform_device *pdev)
80{
81 omapfb_register_panel(&palmte_panel);
82 return 0;
83}
84
85static int palmte_panel_remove(struct platform_device *pdev)
86{
87 return 0;
88}
89
90static int palmte_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
91{
92 return 0;
93}
94
95static int palmte_panel_resume(struct platform_device *pdev)
96{
97 return 0;
98}
99
100struct platform_driver palmte_panel_driver = {
101 .probe = palmte_panel_probe,
102 .remove = palmte_panel_remove,
103 .suspend = palmte_panel_suspend,
104 .resume = palmte_panel_resume,
105 .driver = {
106 .name = "lcd_palmte",
107 .owner = THIS_MODULE,
108 },
109};
110
111static int palmte_panel_drv_init(void)
112{
113 return platform_driver_register(&palmte_panel_driver);
114}
115
116static void palmte_panel_drv_cleanup(void)
117{
118 platform_driver_unregister(&palmte_panel_driver);
119}
120
121module_init(palmte_panel_drv_init);
122module_exit(palmte_panel_drv_cleanup);
123
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
new file mode 100644
index 000000000000..4bb349f54356
--- /dev/null
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -0,0 +1,127 @@
1/*
2 * LCD panel support for Palm Tungsten|T
3 * Current version : Marek Vasut <marek.vasut@gmail.com>
4 *
5 * Modified from lcd_inn1510.c
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23GPIO11 - backlight
24GPIO12 - screen blanking
25GPIO13 - screen blanking
26*/
27
28#include <linux/platform_device.h>
29#include <linux/module.h>
30#include <linux/io.h>
31
32#include <asm/arch/gpio.h>
33#include <asm/arch/omapfb.h>
34
35static int palmtt_panel_init(struct lcd_panel *panel,
36 struct omapfb_device *fbdev)
37{
38 return 0;
39}
40
41static void palmtt_panel_cleanup(struct lcd_panel *panel)
42{
43}
44
45static int palmtt_panel_enable(struct lcd_panel *panel)
46{
47 return 0;
48}
49
50static void palmtt_panel_disable(struct lcd_panel *panel)
51{
52}
53
54static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel)
55{
56 return OMAPFB_CAPS_SET_BACKLIGHT;
57}
58
59struct lcd_panel palmtt_panel = {
60 .name = "palmtt",
61 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
62 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
63 OMAP_LCDC_HSVS_OPPOSITE,
64 .bpp = 16,
65 .data_lines = 16,
66 .x_res = 320,
67 .y_res = 320,
68 .pixel_clock = 10000,
69 .hsw = 4,
70 .hfp = 8,
71 .hbp = 28,
72 .vsw = 1,
73 .vfp = 8,
74 .vbp = 7,
75 .pcd = 0,
76
77 .init = palmtt_panel_init,
78 .cleanup = palmtt_panel_cleanup,
79 .enable = palmtt_panel_enable,
80 .disable = palmtt_panel_disable,
81 .get_caps = palmtt_panel_get_caps,
82};
83
84static int palmtt_panel_probe(struct platform_device *pdev)
85{
86 omapfb_register_panel(&palmtt_panel);
87 return 0;
88}
89
90static int palmtt_panel_remove(struct platform_device *pdev)
91{
92 return 0;
93}
94
95static int palmtt_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
96{
97 return 0;
98}
99
100static int palmtt_panel_resume(struct platform_device *pdev)
101{
102 return 0;
103}
104
105struct platform_driver palmtt_panel_driver = {
106 .probe = palmtt_panel_probe,
107 .remove = palmtt_panel_remove,
108 .suspend = palmtt_panel_suspend,
109 .resume = palmtt_panel_resume,
110 .driver = {
111 .name = "lcd_palmtt",
112 .owner = THIS_MODULE,
113 },
114};
115
116static int palmtt_panel_drv_init(void)
117{
118 return platform_driver_register(&palmtt_panel_driver);
119}
120
121static void palmtt_panel_drv_cleanup(void)
122{
123 platform_driver_unregister(&palmtt_panel_driver);
124}
125
126module_init(palmtt_panel_drv_init);
127module_exit(palmtt_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
new file mode 100644
index 000000000000..ea6170ddff35
--- /dev/null
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -0,0 +1,123 @@
1/*
2 * LCD panel support for the Palm Zire71
3 *
4 * Original version : Romain Goyet
5 * Current version : Laurent Gonzalez
6 * Modified for zire71 : Marek Vasut
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/io.h>
26
27#include <asm/arch/omapfb.h>
28
29static int palmz71_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev)
31{
32 return 0;
33}
34
35static void palmz71_panel_cleanup(struct lcd_panel *panel)
36{
37
38}
39
40static int palmz71_panel_enable(struct lcd_panel *panel)
41{
42 return 0;
43}
44
45static void palmz71_panel_disable(struct lcd_panel *panel)
46{
47}
48
49static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel)
50{
51 return OMAPFB_CAPS_SET_BACKLIGHT;
52}
53
54struct lcd_panel palmz71_panel = {
55 .name = "palmz71",
56 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
57 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
58 OMAP_LCDC_HSVS_OPPOSITE,
59 .data_lines = 16,
60 .bpp = 16,
61 .pixel_clock = 24000,
62 .x_res = 320,
63 .y_res = 320,
64 .hsw = 4,
65 .hfp = 8,
66 .hbp = 28,
67 .vsw = 1,
68 .vfp = 8,
69 .vbp = 7,
70 .pcd = 0,
71
72 .init = palmz71_panel_init,
73 .cleanup = palmz71_panel_cleanup,
74 .enable = palmz71_panel_enable,
75 .disable = palmz71_panel_disable,
76 .get_caps = palmz71_panel_get_caps,
77};
78
79static int palmz71_panel_probe(struct platform_device *pdev)
80{
81 omapfb_register_panel(&palmz71_panel);
82 return 0;
83}
84
85static int palmz71_panel_remove(struct platform_device *pdev)
86{
87 return 0;
88}
89
90static int palmz71_panel_suspend(struct platform_device *pdev,
91 pm_message_t mesg)
92{
93 return 0;
94}
95
96static int palmz71_panel_resume(struct platform_device *pdev)
97{
98 return 0;
99}
100
101struct platform_driver palmz71_panel_driver = {
102 .probe = palmz71_panel_probe,
103 .remove = palmz71_panel_remove,
104 .suspend = palmz71_panel_suspend,
105 .resume = palmz71_panel_resume,
106 .driver = {
107 .name = "lcd_palmz71",
108 .owner = THIS_MODULE,
109 },
110};
111
112static int palmz71_panel_drv_init(void)
113{
114 return platform_driver_register(&palmz71_panel_driver);
115}
116
117static void palmz71_panel_drv_cleanup(void)
118{
119 platform_driver_unregister(&palmz71_panel_driver);
120}
121
122module_init(palmz71_panel_drv_init);
123module_exit(palmz71_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcd_sx1.c b/drivers/video/omap/lcd_sx1.c
new file mode 100644
index 000000000000..c4f306a4e5c9
--- /dev/null
+++ b/drivers/video/omap/lcd_sx1.c
@@ -0,0 +1,334 @@
1/*
2 * LCD panel support for the Siemens SX1 mobile phone
3 *
4 * Current version : Vovan888@gmail.com, great help from FCA00000
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/module.h>
22#include <linux/platform_device.h>
23#include <linux/delay.h>
24#include <linux/io.h>
25
26#include <asm/arch/gpio.h>
27#include <asm/arch/omapfb.h>
28#include <asm/arch/mcbsp.h>
29#include <asm/arch/mux.h>
30
31/*
32 * OMAP310 GPIO registers
33 */
34#define GPIO_DATA_INPUT 0xfffce000
35#define GPIO_DATA_OUTPUT 0xfffce004
36#define GPIO_DIR_CONTROL 0xfffce008
37#define GPIO_INT_CONTROL 0xfffce00c
38#define GPIO_INT_MASK 0xfffce010
39#define GPIO_INT_STATUS 0xfffce014
40#define GPIO_PIN_CONTROL 0xfffce018
41
42
43#define A_LCD_SSC_RD 3
44#define A_LCD_SSC_SD 7
45#define _A_LCD_RESET 9
46#define _A_LCD_SSC_CS 12
47#define _A_LCD_SSC_A0 13
48
49#define DSP_REG 0xE1017024
50
51const unsigned char INIT_1[12] = {
52 0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
53};
54
55const unsigned char INIT_2[127] = {
56 0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
57 0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
58 0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
59 0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
60 0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
61 0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
62 0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
63 0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
64 0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
65 0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
66 0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
67 0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
68 0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
69 0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
70 0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
71 0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
72};
73
74const unsigned char INIT_3[15] = {
75 0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
76 0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
77};
78
79static void epson_sendbyte(int flag, unsigned char byte)
80{
81 int i, shifter = 0x80;
82
83 if (!flag)
84 omap_set_gpio_dataout(_A_LCD_SSC_A0, 0);
85 mdelay(2);
86 omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
87
88 omap_set_gpio_dataout(A_LCD_SSC_SD, flag);
89
90 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
91 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
92 for (i = 0; i < 8; i++) {
93 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
94 omap_set_gpio_dataout(A_LCD_SSC_SD, shifter & byte);
95 OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
96 shifter >>= 1;
97 }
98 omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
99}
100
101static void init_system(void)
102{
103 omap_mcbsp_request(OMAP_MCBSP3);
104 omap_mcbsp_stop(OMAP_MCBSP3);
105}
106
107static void setup_GPIO(void)
108{
109 /* new wave */
110 omap_request_gpio(A_LCD_SSC_RD);
111 omap_request_gpio(A_LCD_SSC_SD);
112 omap_request_gpio(_A_LCD_RESET);
113 omap_request_gpio(_A_LCD_SSC_CS);
114 omap_request_gpio(_A_LCD_SSC_A0);
115
116 /* set all GPIOs to output */
117 omap_set_gpio_direction(A_LCD_SSC_RD, 0);
118 omap_set_gpio_direction(A_LCD_SSC_SD, 0);
119 omap_set_gpio_direction(_A_LCD_RESET, 0);
120 omap_set_gpio_direction(_A_LCD_SSC_CS, 0);
121 omap_set_gpio_direction(_A_LCD_SSC_A0, 0);
122
123 /* set GPIO data */
124 omap_set_gpio_dataout(A_LCD_SSC_RD, 1);
125 omap_set_gpio_dataout(A_LCD_SSC_SD, 0);
126 omap_set_gpio_dataout(_A_LCD_RESET, 0);
127 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
128 omap_set_gpio_dataout(_A_LCD_SSC_A0, 1);
129}
130
131static void display_init(void)
132{
133 int i;
134
135 omap_cfg_reg(MCBSP3_CLKX);
136
137 mdelay(2);
138 setup_GPIO();
139 mdelay(2);
140
141 /* reset LCD */
142 omap_set_gpio_dataout(A_LCD_SSC_SD, 1);
143 epson_sendbyte(0, 0x25);
144
145 omap_set_gpio_dataout(_A_LCD_RESET, 0);
146 mdelay(10);
147 omap_set_gpio_dataout(_A_LCD_RESET, 1);
148
149 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
150 mdelay(2);
151 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
152
153 /* init LCD, phase 1 */
154 epson_sendbyte(0, 0xCA);
155 for (i = 0; i < 10; i++)
156 epson_sendbyte(1, INIT_1[i]);
157 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
158 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
159
160 /* init LCD phase 2 */
161 epson_sendbyte(0, 0xCB);
162 for (i = 0; i < 125; i++)
163 epson_sendbyte(1, INIT_2[i]);
164 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
165 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
166
167 /* init LCD phase 2a */
168 epson_sendbyte(0, 0xCC);
169 for (i = 0; i < 14; i++)
170 epson_sendbyte(1, INIT_3[i]);
171 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
172 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
173
174 /* init LCD phase 3 */
175 epson_sendbyte(0, 0xBC);
176 epson_sendbyte(1, 0x08);
177 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
178 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
179
180 /* init LCD phase 4 */
181 epson_sendbyte(0, 0x07);
182 epson_sendbyte(1, 0x05);
183 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
184 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
185
186 /* init LCD phase 5 */
187 epson_sendbyte(0, 0x94);
188 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
189 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
190
191 /* init LCD phase 6 */
192 epson_sendbyte(0, 0xC6);
193 epson_sendbyte(1, 0x80);
194 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
195 mdelay(100); /* used to be 1000 */
196 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
197
198 /* init LCD phase 7 */
199 epson_sendbyte(0, 0x16);
200 epson_sendbyte(1, 0x02);
201 epson_sendbyte(1, 0x00);
202 epson_sendbyte(1, 0xB1);
203 epson_sendbyte(1, 0x00);
204 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
205 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
206
207 /* init LCD phase 8 */
208 epson_sendbyte(0, 0x76);
209 epson_sendbyte(1, 0x00);
210 epson_sendbyte(1, 0x00);
211 epson_sendbyte(1, 0xDB);
212 epson_sendbyte(1, 0x00);
213 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
214 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
215
216 /* init LCD phase 9 */
217 epson_sendbyte(0, 0xAF);
218 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
219}
220
221static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
222{
223 return 0;
224}
225
226static void sx1_panel_cleanup(struct lcd_panel *panel)
227{
228}
229
230static void sx1_panel_disable(struct lcd_panel *panel)
231{
232 printk(KERN_INFO "SX1: LCD panel disable\n");
233 sx1_setmmipower(0);
234 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
235
236 epson_sendbyte(0, 0x25);
237 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
238
239 epson_sendbyte(0, 0xAE);
240 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
241 mdelay(100);
242 omap_set_gpio_dataout(_A_LCD_SSC_CS, 0);
243
244 epson_sendbyte(0, 0x95);
245 omap_set_gpio_dataout(_A_LCD_SSC_CS, 1);
246}
247
248static int sx1_panel_enable(struct lcd_panel *panel)
249{
250 printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
251 init_system();
252 display_init();
253
254 sx1_setmmipower(1);
255 sx1_setbacklight(0x18);
256 sx1_setkeylight (0x06);
257 return 0;
258}
259
260
261static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
262{
263 return 0;
264}
265
266struct lcd_panel sx1_panel = {
267 .name = "sx1",
268 .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
269 OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
270 OMAP_LCDC_INV_OUTPUT_EN,
271
272 .x_res = 176,
273 .y_res = 220,
274 .data_lines = 16,
275 .bpp = 16,
276 .hsw = 5,
277 .hfp = 5,
278 .hbp = 5,
279 .vsw = 2,
280 .vfp = 1,
281 .vbp = 1,
282 .pixel_clock = 1500,
283
284 .init = sx1_panel_init,
285 .cleanup = sx1_panel_cleanup,
286 .enable = sx1_panel_enable,
287 .disable = sx1_panel_disable,
288 .get_caps = sx1_panel_get_caps,
289};
290
291static int sx1_panel_probe(struct platform_device *pdev)
292{
293 omapfb_register_panel(&sx1_panel);
294 return 0;
295}
296
297static int sx1_panel_remove(struct platform_device *pdev)
298{
299 return 0;
300}
301
302static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
303{
304 return 0;
305}
306
307static int sx1_panel_resume(struct platform_device *pdev)
308{
309 return 0;
310}
311
312struct platform_driver sx1_panel_driver = {
313 .probe = sx1_panel_probe,
314 .remove = sx1_panel_remove,
315 .suspend = sx1_panel_suspend,
316 .resume = sx1_panel_resume,
317 .driver = {
318 .name = "lcd_sx1",
319 .owner = THIS_MODULE,
320 },
321};
322
323static int sx1_panel_drv_init(void)
324{
325 return platform_driver_register(&sx1_panel_driver);
326}
327
328static void sx1_panel_drv_cleanup(void)
329{
330 platform_driver_unregister(&sx1_panel_driver);
331}
332
333module_init(sx1_panel_drv_init);
334module_exit(sx1_panel_drv_cleanup);
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
new file mode 100644
index 000000000000..9085188d815e
--- /dev/null
+++ b/drivers/video/omap/lcdc.c
@@ -0,0 +1,893 @@
1/*
2 * OMAP1 internal LCD controller
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/module.h>
22#include <linux/device.h>
23#include <linux/interrupt.h>
24#include <linux/spinlock.h>
25#include <linux/err.h>
26#include <linux/mm.h>
27#include <linux/fb.h>
28#include <linux/dma-mapping.h>
29#include <linux/vmalloc.h>
30#include <linux/clk.h>
31
32#include <asm/arch/dma.h>
33#include <asm/arch/omapfb.h>
34
35#include <asm/mach-types.h>
36
37#define MODULE_NAME "lcdc"
38
39#define OMAP_LCDC_BASE 0xfffec000
40#define OMAP_LCDC_SIZE 256
41#define OMAP_LCDC_IRQ INT_LCD_CTRL
42
43#define OMAP_LCDC_CONTROL (OMAP_LCDC_BASE + 0x00)
44#define OMAP_LCDC_TIMING0 (OMAP_LCDC_BASE + 0x04)
45#define OMAP_LCDC_TIMING1 (OMAP_LCDC_BASE + 0x08)
46#define OMAP_LCDC_TIMING2 (OMAP_LCDC_BASE + 0x0c)
47#define OMAP_LCDC_STATUS (OMAP_LCDC_BASE + 0x10)
48#define OMAP_LCDC_SUBPANEL (OMAP_LCDC_BASE + 0x14)
49#define OMAP_LCDC_LINE_INT (OMAP_LCDC_BASE + 0x18)
50#define OMAP_LCDC_DISPLAY_STATUS (OMAP_LCDC_BASE + 0x1c)
51
52#define OMAP_LCDC_STAT_DONE (1 << 0)
53#define OMAP_LCDC_STAT_VSYNC (1 << 1)
54#define OMAP_LCDC_STAT_SYNC_LOST (1 << 2)
55#define OMAP_LCDC_STAT_ABC (1 << 3)
56#define OMAP_LCDC_STAT_LINE_INT (1 << 4)
57#define OMAP_LCDC_STAT_FUF (1 << 5)
58#define OMAP_LCDC_STAT_LOADED_PALETTE (1 << 6)
59
60#define OMAP_LCDC_CTRL_LCD_EN (1 << 0)
61#define OMAP_LCDC_CTRL_LCD_TFT (1 << 7)
62#define OMAP_LCDC_CTRL_LINE_IRQ_CLR_SEL (1 << 10)
63
64#define OMAP_LCDC_IRQ_VSYNC (1 << 2)
65#define OMAP_LCDC_IRQ_DONE (1 << 3)
66#define OMAP_LCDC_IRQ_LOADED_PALETTE (1 << 4)
67#define OMAP_LCDC_IRQ_LINE_NIRQ (1 << 5)
68#define OMAP_LCDC_IRQ_LINE (1 << 6)
69#define OMAP_LCDC_IRQ_MASK (((1 << 5) - 1) << 2)
70
71#define MAX_PALETTE_SIZE PAGE_SIZE
72
73enum lcdc_load_mode {
74 OMAP_LCDC_LOAD_PALETTE,
75 OMAP_LCDC_LOAD_FRAME,
76 OMAP_LCDC_LOAD_PALETTE_AND_FRAME
77};
78
79static struct omap_lcd_controller {
80 enum omapfb_update_mode update_mode;
81 int ext_mode;
82
83 unsigned long frame_offset;
84 int screen_width;
85 int xres;
86 int yres;
87
88 enum omapfb_color_format color_mode;
89 int bpp;
90 void *palette_virt;
91 dma_addr_t palette_phys;
92 int palette_code;
93 int palette_size;
94
95 unsigned int irq_mask;
96 struct completion last_frame_complete;
97 struct completion palette_load_complete;
98 struct clk *lcd_ck;
99 struct omapfb_device *fbdev;
100
101 void (*dma_callback)(void *data);
102 void *dma_callback_data;
103
104 int fbmem_allocated;
105 dma_addr_t vram_phys;
106 void *vram_virt;
107 unsigned long vram_size;
108} lcdc;
109
110static void inline enable_irqs(int mask)
111{
112 lcdc.irq_mask |= mask;
113}
114
115static void inline disable_irqs(int mask)
116{
117 lcdc.irq_mask &= ~mask;
118}
119
120static void set_load_mode(enum lcdc_load_mode mode)
121{
122 u32 l;
123
124 l = omap_readl(OMAP_LCDC_CONTROL);
125 l &= ~(3 << 20);
126 switch (mode) {
127 case OMAP_LCDC_LOAD_PALETTE:
128 l |= 1 << 20;
129 break;
130 case OMAP_LCDC_LOAD_FRAME:
131 l |= 2 << 20;
132 break;
133 case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
134 break;
135 default:
136 BUG();
137 }
138 omap_writel(l, OMAP_LCDC_CONTROL);
139}
140
141static void enable_controller(void)
142{
143 u32 l;
144
145 l = omap_readl(OMAP_LCDC_CONTROL);
146 l |= OMAP_LCDC_CTRL_LCD_EN;
147 l &= ~OMAP_LCDC_IRQ_MASK;
148 l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */
149 omap_writel(l, OMAP_LCDC_CONTROL);
150}
151
152static void disable_controller_async(void)
153{
154 u32 l;
155 u32 mask;
156
157 l = omap_readl(OMAP_LCDC_CONTROL);
158 mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
159 /*
160 * Preserve the DONE mask, since we still want to get the
161 * final DONE irq. It will be disabled in the IRQ handler.
162 */
163 mask &= ~OMAP_LCDC_IRQ_DONE;
164 l &= ~mask;
165 omap_writel(l, OMAP_LCDC_CONTROL);
166}
167
168static void disable_controller(void)
169{
170 init_completion(&lcdc.last_frame_complete);
171 disable_controller_async();
172 if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
173 msecs_to_jiffies(500)))
174 dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
175}
176
177static void reset_controller(u32 status)
178{
179 static unsigned long reset_count;
180 static unsigned long last_jiffies;
181
182 disable_controller_async();
183 reset_count++;
184 if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
185 dev_err(lcdc.fbdev->dev,
186 "resetting (status %#010x,reset count %lu)\n",
187 status, reset_count);
188 last_jiffies = jiffies;
189 }
190 if (reset_count < 100) {
191 enable_controller();
192 } else {
193 reset_count = 0;
194 dev_err(lcdc.fbdev->dev,
195 "too many reset attempts, giving up.\n");
196 }
197}
198
199/*
200 * Configure the LCD DMA according to the current mode specified by parameters
201 * in lcdc.fbdev and fbdev->var.
202 */
203static void setup_lcd_dma(void)
204{
205 static const int dma_elem_type[] = {
206 0,
207 OMAP_DMA_DATA_TYPE_S8,
208 OMAP_DMA_DATA_TYPE_S16,
209 0,
210 OMAP_DMA_DATA_TYPE_S32,
211 };
212 struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
213 struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
214 unsigned long src;
215 int esize, xelem, yelem;
216
217 src = lcdc.vram_phys + lcdc.frame_offset;
218
219 switch (var->rotate) {
220 case 0:
221 if (plane->info.mirror || (src & 3) ||
222 lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
223 (lcdc.xres & 1))
224 esize = 2;
225 else
226 esize = 4;
227 xelem = lcdc.xres * lcdc.bpp / 8 / esize;
228 yelem = lcdc.yres;
229 break;
230 case 90:
231 case 180:
232 case 270:
233 if (cpu_is_omap15xx()) {
234 BUG();
235 }
236 esize = 2;
237 xelem = lcdc.yres * lcdc.bpp / 16;
238 yelem = lcdc.xres;
239 break;
240 default:
241 BUG();
242 return;
243 }
244#ifdef VERBOSE
245 dev_dbg(lcdc.fbdev->dev,
246 "setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
247 src, esize, xelem, yelem);
248#endif
249 omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
250 if (!cpu_is_omap15xx()) {
251 int bpp = lcdc.bpp;
252
253 /*
254 * YUV support is only for external mode when we have the
255 * YUV window embedded in a 16bpp frame buffer.
256 */
257 if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
258 bpp = 16;
259 /* Set virtual xres elem size */
260 omap_set_lcd_dma_b1_vxres(
261 lcdc.screen_width * bpp / 8 / esize);
262 /* Setup transformations */
263 omap_set_lcd_dma_b1_rotation(var->rotate);
264 omap_set_lcd_dma_b1_mirror(plane->info.mirror);
265 }
266 omap_setup_lcd_dma();
267}
268
269static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
270{
271 u32 status;
272
273 status = omap_readl(OMAP_LCDC_STATUS);
274
275 if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
276 reset_controller(status);
277 else {
278 if (status & OMAP_LCDC_STAT_DONE) {
279 u32 l;
280
281 /*
282 * Disable IRQ_DONE. The status bit will be cleared
283 * only when the controller is reenabled and we don't
284 * want to get more interrupts.
285 */
286 l = omap_readl(OMAP_LCDC_CONTROL);
287 l &= ~OMAP_LCDC_IRQ_DONE;
288 omap_writel(l, OMAP_LCDC_CONTROL);
289 complete(&lcdc.last_frame_complete);
290 }
291 if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
292 disable_controller_async();
293 complete(&lcdc.palette_load_complete);
294 }
295 }
296
297 /*
298 * Clear these interrupt status bits.
299 * Sync_lost, FUF bits were cleared by disabling the LCD controller
300 * LOADED_PALETTE can be cleared this way only in palette only
301 * load mode. In other load modes it's cleared by disabling the
302 * controller.
303 */
304 status &= ~(OMAP_LCDC_STAT_VSYNC |
305 OMAP_LCDC_STAT_LOADED_PALETTE |
306 OMAP_LCDC_STAT_ABC |
307 OMAP_LCDC_STAT_LINE_INT);
308 omap_writel(status, OMAP_LCDC_STATUS);
309 return IRQ_HANDLED;
310}
311
312/*
313 * Change to a new video mode. We defer this to a later time to avoid any
314 * flicker and not to mess up the current LCD DMA context. For this we disable
315 * the LCD controler, which will generate a DONE irq after the last frame has
316 * been transferred. Then it'll be safe to reconfigure both the LCD controller
317 * as well as the LCD DMA.
318 */
319static int omap_lcdc_setup_plane(int plane, int channel_out,
320 unsigned long offset, int screen_width,
321 int pos_x, int pos_y, int width, int height,
322 int color_mode)
323{
324 struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
325 struct lcd_panel *panel = lcdc.fbdev->panel;
326 int rot_x, rot_y;
327
328 if (var->rotate == 0) {
329 rot_x = panel->x_res;
330 rot_y = panel->y_res;
331 } else {
332 rot_x = panel->y_res;
333 rot_y = panel->x_res;
334 }
335 if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
336 width > rot_x || height > rot_y) {
337#ifdef VERBOSE
338 dev_dbg(lcdc.fbdev->dev,
339 "invalid plane params plane %d pos_x %d pos_y %d "
340 "w %d h %d\n", plane, pos_x, pos_y, width, height);
341#endif
342 return -EINVAL;
343 }
344
345 lcdc.frame_offset = offset;
346 lcdc.xres = width;
347 lcdc.yres = height;
348 lcdc.screen_width = screen_width;
349 lcdc.color_mode = color_mode;
350
351 switch (color_mode) {
352 case OMAPFB_COLOR_CLUT_8BPP:
353 lcdc.bpp = 8;
354 lcdc.palette_code = 0x3000;
355 lcdc.palette_size = 512;
356 break;
357 case OMAPFB_COLOR_RGB565:
358 lcdc.bpp = 16;
359 lcdc.palette_code = 0x4000;
360 lcdc.palette_size = 32;
361 break;
362 case OMAPFB_COLOR_RGB444:
363 lcdc.bpp = 16;
364 lcdc.palette_code = 0x4000;
365 lcdc.palette_size = 32;
366 break;
367 case OMAPFB_COLOR_YUV420:
368 if (lcdc.ext_mode) {
369 lcdc.bpp = 12;
370 break;
371 }
372 /* fallthrough */
373 case OMAPFB_COLOR_YUV422:
374 if (lcdc.ext_mode) {
375 lcdc.bpp = 16;
376 break;
377 }
378 /* fallthrough */
379 default:
380 /* FIXME: other BPPs.
381 * bpp1: code 0, size 256
382 * bpp2: code 0x1000 size 256
383 * bpp4: code 0x2000 size 256
384 * bpp12: code 0x4000 size 32
385 */
386 dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
387 BUG();
388 return -1;
389 }
390
391 if (lcdc.ext_mode) {
392 setup_lcd_dma();
393 return 0;
394 }
395
396 if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
397 disable_controller();
398 omap_stop_lcd_dma();
399 setup_lcd_dma();
400 enable_controller();
401 }
402
403 return 0;
404}
405
406static int omap_lcdc_enable_plane(int plane, int enable)
407{
408 dev_dbg(lcdc.fbdev->dev,
409 "plane %d enable %d update_mode %d ext_mode %d\n",
410 plane, enable, lcdc.update_mode, lcdc.ext_mode);
411 if (plane != OMAPFB_PLANE_GFX)
412 return -EINVAL;
413
414 return 0;
415}
416
417/*
418 * Configure the LCD DMA for a palette load operation and do the palette
419 * downloading synchronously. We don't use the frame+palette load mode of
420 * the controller, since the palette can always be downloaded seperately.
421 */
422static void load_palette(void)
423{
424 u16 *palette;
425
426 palette = (u16 *)lcdc.palette_virt;
427
428 *(u16 *)palette &= 0x0fff;
429 *(u16 *)palette |= lcdc.palette_code;
430
431 omap_set_lcd_dma_b1(lcdc.palette_phys,
432 lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
433
434 omap_set_lcd_dma_single_transfer(1);
435 omap_setup_lcd_dma();
436
437 init_completion(&lcdc.palette_load_complete);
438 enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
439 set_load_mode(OMAP_LCDC_LOAD_PALETTE);
440 enable_controller();
441 if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
442 msecs_to_jiffies(500)))
443 dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
444 /* The controller gets disabled in the irq handler */
445 disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
446 omap_stop_lcd_dma();
447
448 omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
449}
450
451/* Used only in internal controller mode */
452static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
453 u16 transp, int update_hw_pal)
454{
455 u16 *palette;
456
457 if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
458 return -EINVAL;
459
460 palette = (u16 *)lcdc.palette_virt;
461
462 palette[regno] &= ~0x0fff;
463 palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
464 (blue >> 12);
465
466 if (update_hw_pal) {
467 disable_controller();
468 omap_stop_lcd_dma();
469 load_palette();
470 setup_lcd_dma();
471 set_load_mode(OMAP_LCDC_LOAD_FRAME);
472 enable_controller();
473 }
474
475 return 0;
476}
477
478static void calc_ck_div(int is_tft, int pck, int *pck_div)
479{
480 unsigned long lck;
481
482 pck = max(1, pck);
483 lck = clk_get_rate(lcdc.lcd_ck);
484 *pck_div = (lck + pck - 1) / pck;
485 if (is_tft)
486 *pck_div = max(2, *pck_div);
487 else
488 *pck_div = max(3, *pck_div);
489 if (*pck_div > 255) {
490 /* FIXME: try to adjust logic clock divider as well */
491 *pck_div = 255;
492 dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
493 pck / 1000);
494 }
495}
496
497static void inline setup_regs(void)
498{
499 u32 l;
500 struct lcd_panel *panel = lcdc.fbdev->panel;
501 int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
502 unsigned long lck;
503 int pcd;
504
505 l = omap_readl(OMAP_LCDC_CONTROL);
506 l &= ~OMAP_LCDC_CTRL_LCD_TFT;
507 l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
508#ifdef CONFIG_MACH_OMAP_PALMTE
509/* FIXME:if (machine_is_omap_palmte()) { */
510 /* PalmTE uses alternate TFT setting in 8BPP mode */
511 l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
512/* } */
513#endif
514 omap_writel(l, OMAP_LCDC_CONTROL);
515
516 l = omap_readl(OMAP_LCDC_TIMING2);
517 l &= ~(((1 << 6) - 1) << 20);
518 l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
519 omap_writel(l, OMAP_LCDC_TIMING2);
520
521 l = panel->x_res - 1;
522 l |= (panel->hsw - 1) << 10;
523 l |= (panel->hfp - 1) << 16;
524 l |= (panel->hbp - 1) << 24;
525 omap_writel(l, OMAP_LCDC_TIMING0);
526
527 l = panel->y_res - 1;
528 l |= (panel->vsw - 1) << 10;
529 l |= panel->vfp << 16;
530 l |= panel->vbp << 24;
531 omap_writel(l, OMAP_LCDC_TIMING1);
532
533 l = omap_readl(OMAP_LCDC_TIMING2);
534 l &= ~0xff;
535
536 lck = clk_get_rate(lcdc.lcd_ck);
537
538 if (!panel->pcd)
539 calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
540 else {
541 dev_warn(lcdc.fbdev->dev,
542 "Pixel clock divider value is obsolete.\n"
543 "Try to set pixel_clock to %lu and pcd to 0 "
544 "in drivers/video/omap/lcd_%s.c and submit a patch.\n",
545 lck / panel->pcd / 1000, panel->name);
546
547 pcd = panel->pcd;
548 }
549 l |= pcd & 0xff;
550 l |= panel->acb << 8;
551 omap_writel(l, OMAP_LCDC_TIMING2);
552
553 /* update panel info with the exact clock */
554 panel->pixel_clock = lck / pcd / 1000;
555}
556
557/*
558 * Configure the LCD controller, download the color palette and start a looped
559 * DMA transfer of the frame image data. Called only in internal
560 * controller mode.
561 */
562static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
563{
564 int r = 0;
565
566 if (mode != lcdc.update_mode) {
567 switch (mode) {
568 case OMAPFB_AUTO_UPDATE:
569 setup_regs();
570 load_palette();
571
572 /* Setup and start LCD DMA */
573 setup_lcd_dma();
574
575 set_load_mode(OMAP_LCDC_LOAD_FRAME);
576 enable_irqs(OMAP_LCDC_IRQ_DONE);
577 /* This will start the actual DMA transfer */
578 enable_controller();
579 lcdc.update_mode = mode;
580 break;
581 case OMAPFB_UPDATE_DISABLED:
582 disable_controller();
583 omap_stop_lcd_dma();
584 lcdc.update_mode = mode;
585 break;
586 default:
587 r = -EINVAL;
588 }
589 }
590
591 return r;
592}
593
594static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
595{
596 return lcdc.update_mode;
597}
598
599/* PM code called only in internal controller mode */
600static void omap_lcdc_suspend(void)
601{
602 if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
603 disable_controller();
604 omap_stop_lcd_dma();
605 }
606}
607
608static void omap_lcdc_resume(void)
609{
610 if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
611 setup_regs();
612 load_palette();
613 setup_lcd_dma();
614 set_load_mode(OMAP_LCDC_LOAD_FRAME);
615 enable_irqs(OMAP_LCDC_IRQ_DONE);
616 enable_controller();
617 }
618}
619
620static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
621{
622 return;
623}
624
625int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
626{
627 BUG_ON(callback == NULL);
628
629 if (lcdc.dma_callback)
630 return -EBUSY;
631 else {
632 lcdc.dma_callback = callback;
633 lcdc.dma_callback_data = data;
634 }
635 return 0;
636}
637EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
638
639void omap_lcdc_free_dma_callback(void)
640{
641 lcdc.dma_callback = NULL;
642}
643EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
644
645static void lcdc_dma_handler(u16 status, void *data)
646{
647 if (lcdc.dma_callback)
648 lcdc.dma_callback(lcdc.dma_callback_data);
649}
650
651static int mmap_kern(void)
652{
653 struct vm_struct *kvma;
654 struct vm_area_struct vma;
655 pgprot_t pgprot;
656 unsigned long vaddr;
657
658 kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
659 if (kvma == NULL) {
660 dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
661 return -ENOMEM;
662 }
663 vma.vm_mm = &init_mm;
664
665 vaddr = (unsigned long)kvma->addr;
666 vma.vm_start = vaddr;
667 vma.vm_end = vaddr + lcdc.vram_size;
668
669 pgprot = pgprot_writecombine(pgprot_kernel);
670 if (io_remap_pfn_range(&vma, vaddr,
671 lcdc.vram_phys >> PAGE_SHIFT,
672 lcdc.vram_size, pgprot) < 0) {
673 dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
674 return -EAGAIN;
675 }
676
677 lcdc.vram_virt = (void *)vaddr;
678
679 return 0;
680}
681
682static void unmap_kern(void)
683{
684 vunmap(lcdc.vram_virt);
685}
686
687static int alloc_palette_ram(void)
688{
689 lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
690 MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
691 if (lcdc.palette_virt == NULL) {
692 dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
693 return -ENOMEM;
694 }
695 memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
696
697 return 0;
698}
699
700static void free_palette_ram(void)
701{
702 dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
703 lcdc.palette_virt, lcdc.palette_phys);
704}
705
706static int alloc_fbmem(struct omapfb_mem_region *region)
707{
708 int bpp;
709 int frame_size;
710 struct lcd_panel *panel = lcdc.fbdev->panel;
711
712 bpp = panel->bpp;
713 if (bpp == 12)
714 bpp = 16;
715 frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
716 if (region->size > frame_size)
717 frame_size = region->size;
718 lcdc.vram_size = frame_size;
719 lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
720 lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
721 if (lcdc.vram_virt == NULL) {
722 dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
723 return -ENOMEM;
724 }
725 region->size = frame_size;
726 region->paddr = lcdc.vram_phys;
727 region->vaddr = lcdc.vram_virt;
728 region->alloc = 1;
729
730 memset(lcdc.vram_virt, 0, lcdc.vram_size);
731
732 return 0;
733}
734
735static void free_fbmem(void)
736{
737 dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
738 lcdc.vram_virt, lcdc.vram_phys);
739}
740
741static int setup_fbmem(struct omapfb_mem_desc *req_md)
742{
743 int r;
744
745 if (!req_md->region_cnt) {
746 dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
747 return -EINVAL;
748 }
749
750 if (req_md->region_cnt > 1) {
751 dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
752 req_md->region_cnt = 1;
753 }
754
755 if (req_md->region[0].paddr == 0) {
756 lcdc.fbmem_allocated = 1;
757 if ((r = alloc_fbmem(&req_md->region[0])) < 0)
758 return r;
759 return 0;
760 }
761
762 lcdc.vram_phys = req_md->region[0].paddr;
763 lcdc.vram_size = req_md->region[0].size;
764
765 if ((r = mmap_kern()) < 0)
766 return r;
767
768 dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
769 lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
770
771 return 0;
772}
773
774static void cleanup_fbmem(void)
775{
776 if (lcdc.fbmem_allocated)
777 free_fbmem();
778 else
779 unmap_kern();
780}
781
782static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
783 struct omapfb_mem_desc *req_vram)
784{
785 int r;
786 u32 l;
787 int rate;
788 struct clk *tc_ck;
789
790 lcdc.irq_mask = 0;
791
792 lcdc.fbdev = fbdev;
793 lcdc.ext_mode = ext_mode;
794
795 l = 0;
796 omap_writel(l, OMAP_LCDC_CONTROL);
797
798 /* FIXME:
799 * According to errata some platforms have a clock rate limitiation
800 */
801 lcdc.lcd_ck = clk_get(NULL, "lcd_ck");
802 if (IS_ERR(lcdc.lcd_ck)) {
803 dev_err(fbdev->dev, "unable to access LCD clock\n");
804 r = PTR_ERR(lcdc.lcd_ck);
805 goto fail0;
806 }
807
808 tc_ck = clk_get(NULL, "tc_ck");
809 if (IS_ERR(tc_ck)) {
810 dev_err(fbdev->dev, "unable to access TC clock\n");
811 r = PTR_ERR(tc_ck);
812 goto fail1;
813 }
814
815 rate = clk_get_rate(tc_ck);
816 clk_put(tc_ck);
817
818 if (machine_is_ams_delta())
819 rate /= 4;
820 if (machine_is_omap_h3())
821 rate /= 3;
822 r = clk_set_rate(lcdc.lcd_ck, rate);
823 if (r) {
824 dev_err(fbdev->dev, "failed to adjust LCD rate\n");
825 goto fail1;
826 }
827 clk_enable(lcdc.lcd_ck);
828
829 r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
830 if (r) {
831 dev_err(fbdev->dev, "unable to get IRQ\n");
832 goto fail2;
833 }
834
835 r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
836 if (r) {
837 dev_err(fbdev->dev, "unable to get LCD DMA\n");
838 goto fail3;
839 }
840
841 omap_set_lcd_dma_single_transfer(ext_mode);
842 omap_set_lcd_dma_ext_controller(ext_mode);
843
844 if (!ext_mode)
845 if ((r = alloc_palette_ram()) < 0)
846 goto fail4;
847
848 if ((r = setup_fbmem(req_vram)) < 0)
849 goto fail5;
850
851 pr_info("omapfb: LCDC initialized\n");
852
853 return 0;
854fail5:
855 if (!ext_mode)
856 free_palette_ram();
857fail4:
858 omap_free_lcd_dma();
859fail3:
860 free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
861fail2:
862 clk_disable(lcdc.lcd_ck);
863fail1:
864 clk_put(lcdc.lcd_ck);
865fail0:
866 return r;
867}
868
869static void omap_lcdc_cleanup(void)
870{
871 if (!lcdc.ext_mode)
872 free_palette_ram();
873 cleanup_fbmem();
874 omap_free_lcd_dma();
875 free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
876 clk_disable(lcdc.lcd_ck);
877 clk_put(lcdc.lcd_ck);
878}
879
880const struct lcd_ctrl omap1_int_ctrl = {
881 .name = "internal",
882 .init = omap_lcdc_init,
883 .cleanup = omap_lcdc_cleanup,
884 .get_caps = omap_lcdc_get_caps,
885 .set_update_mode = omap_lcdc_set_update_mode,
886 .get_update_mode = omap_lcdc_get_update_mode,
887 .update_window = NULL,
888 .suspend = omap_lcdc_suspend,
889 .resume = omap_lcdc_resume,
890 .setup_plane = omap_lcdc_setup_plane,
891 .enable_plane = omap_lcdc_enable_plane,
892 .setcolreg = omap_lcdc_setcolreg,
893};
diff --git a/drivers/video/omap/lcdc.h b/drivers/video/omap/lcdc.h
new file mode 100644
index 000000000000..adb731e5314a
--- /dev/null
+++ b/drivers/video/omap/lcdc.h
@@ -0,0 +1,7 @@
1#ifndef LCDC_H
2#define LCDC_H
3
4int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data);
5void omap_lcdc_free_dma_callback(void);
6
7#endif
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
new file mode 100644
index 000000000000..14d0f7a11145
--- /dev/null
+++ b/drivers/video/omap/omapfb_main.c
@@ -0,0 +1,1941 @@
1/*
2 * Framebuffer driver for TI OMAP boards
3 *
4 * Copyright (C) 2004 Nokia Corporation
5 * Author: Imre Deak <imre.deak@nokia.com>
6 *
7 * Acknowledgements:
8 * Alex McMains <aam@ridgerun.com> - Original driver
9 * Juha Yrjola <juha.yrjola@nokia.com> - Original driver and improvements
10 * Dirk Behme <dirk.behme@de.bosch.com> - changes for 2.6 kernel API
11 * Texas Instruments - H3 support
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27#include <linux/platform_device.h>
28#include <linux/uaccess.h>
29
30#include <asm/mach-types.h>
31#include <asm/arch/dma.h>
32#include <asm/arch/omapfb.h>
33
34#define MODULE_NAME "omapfb"
35
36static unsigned int def_accel;
37static unsigned long def_vram[OMAPFB_PLANE_NUM];
38static int def_vram_cnt;
39static unsigned long def_vxres;
40static unsigned long def_vyres;
41static unsigned int def_rotate;
42static unsigned int def_mirror;
43
44#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
45static int manual_update = 1;
46#else
47static int manual_update;
48#endif
49
50static struct platform_device *fbdev_pdev;
51static struct lcd_panel *fbdev_panel;
52static struct omapfb_device *omapfb_dev;
53
54struct caps_table_struct {
55 unsigned long flag;
56 const char *name;
57};
58
59static struct caps_table_struct ctrl_caps[] = {
60 { OMAPFB_CAPS_MANUAL_UPDATE, "manual update" },
61 { OMAPFB_CAPS_TEARSYNC, "tearing synchronization" },
62 { OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" },
63 { OMAPFB_CAPS_PLANE_SCALE, "scale plane" },
64 { OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE, "pixel double window" },
65 { OMAPFB_CAPS_WINDOW_SCALE, "scale window" },
66 { OMAPFB_CAPS_WINDOW_OVERLAY, "overlay window" },
67 { OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" },
68};
69
70static struct caps_table_struct color_caps[] = {
71 { 1 << OMAPFB_COLOR_RGB565, "RGB565", },
72 { 1 << OMAPFB_COLOR_YUV422, "YUV422", },
73 { 1 << OMAPFB_COLOR_YUV420, "YUV420", },
74 { 1 << OMAPFB_COLOR_CLUT_8BPP, "CLUT8", },
75 { 1 << OMAPFB_COLOR_CLUT_4BPP, "CLUT4", },
76 { 1 << OMAPFB_COLOR_CLUT_2BPP, "CLUT2", },
77 { 1 << OMAPFB_COLOR_CLUT_1BPP, "CLUT1", },
78 { 1 << OMAPFB_COLOR_RGB444, "RGB444", },
79 { 1 << OMAPFB_COLOR_YUY422, "YUY422", },
80};
81
82/*
83 * ---------------------------------------------------------------------------
84 * LCD panel
85 * ---------------------------------------------------------------------------
86 */
87extern struct lcd_ctrl omap1_int_ctrl;
88extern struct lcd_ctrl omap2_int_ctrl;
89extern struct lcd_ctrl hwa742_ctrl;
90extern struct lcd_ctrl blizzard_ctrl;
91
92static struct lcd_ctrl *ctrls[] = {
93#ifdef CONFIG_ARCH_OMAP1
94 &omap1_int_ctrl,
95#else
96 &omap2_int_ctrl,
97#endif
98
99#ifdef CONFIG_FB_OMAP_LCDC_HWA742
100 &hwa742_ctrl,
101#endif
102#ifdef CONFIG_FB_OMAP_LCDC_BLIZZARD
103 &blizzard_ctrl,
104#endif
105};
106
107#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
108#ifdef CONFIG_ARCH_OMAP1
109extern struct lcd_ctrl_extif omap1_ext_if;
110#else
111extern struct lcd_ctrl_extif omap2_ext_if;
112#endif
113#endif
114
115static void omapfb_rqueue_lock(struct omapfb_device *fbdev)
116{
117 mutex_lock(&fbdev->rqueue_mutex);
118}
119
120static void omapfb_rqueue_unlock(struct omapfb_device *fbdev)
121{
122 mutex_unlock(&fbdev->rqueue_mutex);
123}
124
125/*
126 * ---------------------------------------------------------------------------
127 * LCD controller and LCD DMA
128 * ---------------------------------------------------------------------------
129 */
130/* Lookup table to map elem size to elem type. */
131static const int dma_elem_type[] = {
132 0,
133 OMAP_DMA_DATA_TYPE_S8,
134 OMAP_DMA_DATA_TYPE_S16,
135 0,
136 OMAP_DMA_DATA_TYPE_S32,
137};
138
139/*
140 * Allocate resources needed for LCD controller and LCD DMA operations. Video
141 * memory is allocated from system memory according to the virtual display
142 * size, except if a bigger memory size is specified explicitly as a kernel
143 * parameter.
144 */
145static int ctrl_init(struct omapfb_device *fbdev)
146{
147 int r;
148 int i;
149
150 /* kernel/module vram parameters override boot tags/board config */
151 if (def_vram_cnt) {
152 for (i = 0; i < def_vram_cnt; i++)
153 fbdev->mem_desc.region[i].size =
154 PAGE_ALIGN(def_vram[i]);
155 fbdev->mem_desc.region_cnt = i;
156 } else {
157 struct omapfb_platform_data *conf;
158
159 conf = fbdev->dev->platform_data;
160 fbdev->mem_desc = conf->mem_desc;
161 }
162
163 if (!fbdev->mem_desc.region_cnt) {
164 struct lcd_panel *panel = fbdev->panel;
165 int def_size;
166 int bpp = panel->bpp;
167
168 /* 12 bpp is packed in 16 bits */
169 if (bpp == 12)
170 bpp = 16;
171 def_size = def_vxres * def_vyres * bpp / 8;
172 fbdev->mem_desc.region_cnt = 1;
173 fbdev->mem_desc.region[0].size = PAGE_ALIGN(def_size);
174 }
175 r = fbdev->ctrl->init(fbdev, 0, &fbdev->mem_desc);
176 if (r < 0) {
177 dev_err(fbdev->dev, "controller initialization failed (%d)\n",
178 r);
179 return r;
180 }
181
182#ifdef DEBUG
183 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
184 dev_dbg(fbdev->dev, "region%d phys %08x virt %p size=%lu\n",
185 i,
186 fbdev->mem_desc.region[i].paddr,
187 fbdev->mem_desc.region[i].vaddr,
188 fbdev->mem_desc.region[i].size);
189 }
190#endif
191 return 0;
192}
193
194static void ctrl_cleanup(struct omapfb_device *fbdev)
195{
196 fbdev->ctrl->cleanup();
197}
198
199/* Must be called with fbdev->rqueue_mutex held. */
200static int ctrl_change_mode(struct fb_info *fbi)
201{
202 int r;
203 unsigned long offset;
204 struct omapfb_plane_struct *plane = fbi->par;
205 struct omapfb_device *fbdev = plane->fbdev;
206 struct fb_var_screeninfo *var = &fbi->var;
207
208 offset = var->yoffset * fbi->fix.line_length +
209 var->xoffset * var->bits_per_pixel / 8;
210
211 if (fbdev->ctrl->sync)
212 fbdev->ctrl->sync();
213 r = fbdev->ctrl->setup_plane(plane->idx, plane->info.channel_out,
214 offset, var->xres_virtual,
215 plane->info.pos_x, plane->info.pos_y,
216 var->xres, var->yres, plane->color_mode);
217 if (fbdev->ctrl->set_scale != NULL)
218 r = fbdev->ctrl->set_scale(plane->idx,
219 var->xres, var->yres,
220 plane->info.out_width,
221 plane->info.out_height);
222
223 return r;
224}
225
226/*
227 * ---------------------------------------------------------------------------
228 * fbdev framework callbacks and the ioctl interface
229 * ---------------------------------------------------------------------------
230 */
231/* Called each time the omapfb device is opened */
232static int omapfb_open(struct fb_info *info, int user)
233{
234 return 0;
235}
236
237static void omapfb_sync(struct fb_info *info);
238
239/* Called when the omapfb device is closed. We make sure that any pending
240 * gfx DMA operations are ended, before we return. */
241static int omapfb_release(struct fb_info *info, int user)
242{
243 omapfb_sync(info);
244 return 0;
245}
246
247/* Store a single color palette entry into a pseudo palette or the hardware
248 * palette if one is available. For now we support only 16bpp and thus store
249 * the entry only to the pseudo palette.
250 */
251static int _setcolreg(struct fb_info *info, u_int regno, u_int red, u_int green,
252 u_int blue, u_int transp, int update_hw_pal)
253{
254 struct omapfb_plane_struct *plane = info->par;
255 struct omapfb_device *fbdev = plane->fbdev;
256 struct fb_var_screeninfo *var = &info->var;
257 int r = 0;
258
259 switch (plane->color_mode) {
260 case OMAPFB_COLOR_YUV422:
261 case OMAPFB_COLOR_YUV420:
262 case OMAPFB_COLOR_YUY422:
263 r = -EINVAL;
264 break;
265 case OMAPFB_COLOR_CLUT_8BPP:
266 case OMAPFB_COLOR_CLUT_4BPP:
267 case OMAPFB_COLOR_CLUT_2BPP:
268 case OMAPFB_COLOR_CLUT_1BPP:
269 if (fbdev->ctrl->setcolreg)
270 r = fbdev->ctrl->setcolreg(regno, red, green, blue,
271 transp, update_hw_pal);
272 /* Fallthrough */
273 case OMAPFB_COLOR_RGB565:
274 case OMAPFB_COLOR_RGB444:
275 if (r != 0)
276 break;
277
278 if (regno < 0) {
279 r = -EINVAL;
280 break;
281 }
282
283 if (regno < 16) {
284 u16 pal;
285 pal = ((red >> (16 - var->red.length)) <<
286 var->red.offset) |
287 ((green >> (16 - var->green.length)) <<
288 var->green.offset) |
289 (blue >> (16 - var->blue.length));
290 ((u32 *)(info->pseudo_palette))[regno] = pal;
291 }
292 break;
293 default:
294 BUG();
295 }
296 return r;
297}
298
299static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
300 u_int transp, struct fb_info *info)
301{
302 return _setcolreg(info, regno, red, green, blue, transp, 1);
303}
304
305static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
306{
307 int count, index, r;
308 u16 *red, *green, *blue, *transp;
309 u16 trans = 0xffff;
310
311 red = cmap->red;
312 green = cmap->green;
313 blue = cmap->blue;
314 transp = cmap->transp;
315 index = cmap->start;
316
317 for (count = 0; count < cmap->len; count++) {
318 if (transp)
319 trans = *transp++;
320 r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
321 count == cmap->len - 1);
322 if (r != 0)
323 return r;
324 }
325
326 return 0;
327}
328
329static int omapfb_update_full_screen(struct fb_info *fbi);
330
331static int omapfb_blank(int blank, struct fb_info *fbi)
332{
333 struct omapfb_plane_struct *plane = fbi->par;
334 struct omapfb_device *fbdev = plane->fbdev;
335 int do_update = 0;
336 int r = 0;
337
338 omapfb_rqueue_lock(fbdev);
339 switch (blank) {
340 case VESA_NO_BLANKING:
341 if (fbdev->state == OMAPFB_SUSPENDED) {
342 if (fbdev->ctrl->resume)
343 fbdev->ctrl->resume();
344 fbdev->panel->enable(fbdev->panel);
345 fbdev->state = OMAPFB_ACTIVE;
346 if (fbdev->ctrl->get_update_mode() ==
347 OMAPFB_MANUAL_UPDATE)
348 do_update = 1;
349 }
350 break;
351 case VESA_POWERDOWN:
352 if (fbdev->state == OMAPFB_ACTIVE) {
353 fbdev->panel->disable(fbdev->panel);
354 if (fbdev->ctrl->suspend)
355 fbdev->ctrl->suspend();
356 fbdev->state = OMAPFB_SUSPENDED;
357 }
358 break;
359 default:
360 r = -EINVAL;
361 }
362 omapfb_rqueue_unlock(fbdev);
363
364 if (r == 0 && do_update)
365 r = omapfb_update_full_screen(fbi);
366
367 return r;
368}
369
370static void omapfb_sync(struct fb_info *fbi)
371{
372 struct omapfb_plane_struct *plane = fbi->par;
373 struct omapfb_device *fbdev = plane->fbdev;
374
375 omapfb_rqueue_lock(fbdev);
376 if (fbdev->ctrl->sync)
377 fbdev->ctrl->sync();
378 omapfb_rqueue_unlock(fbdev);
379}
380
381/*
382 * Set fb_info.fix fields and also updates fbdev.
383 * When calling this fb_info.var must be set up already.
384 */
385static void set_fb_fix(struct fb_info *fbi)
386{
387 struct fb_fix_screeninfo *fix = &fbi->fix;
388 struct fb_var_screeninfo *var = &fbi->var;
389 struct omapfb_plane_struct *plane = fbi->par;
390 struct omapfb_mem_region *rg;
391 int bpp;
392
393 rg = &plane->fbdev->mem_desc.region[plane->idx];
394 fbi->screen_base = (char __iomem *)rg->vaddr;
395 fix->smem_start = rg->paddr;
396 fix->smem_len = rg->size;
397
398 fix->type = FB_TYPE_PACKED_PIXELS;
399 bpp = var->bits_per_pixel;
400 if (var->nonstd)
401 fix->visual = FB_VISUAL_PSEUDOCOLOR;
402 else switch (var->bits_per_pixel) {
403 case 16:
404 case 12:
405 fix->visual = FB_VISUAL_TRUECOLOR;
406 /* 12bpp is stored in 16 bits */
407 bpp = 16;
408 break;
409 case 1:
410 case 2:
411 case 4:
412 case 8:
413 fix->visual = FB_VISUAL_PSEUDOCOLOR;
414 break;
415 }
416 fix->accel = FB_ACCEL_OMAP1610;
417 fix->line_length = var->xres_virtual * bpp / 8;
418}
419
420static int set_color_mode(struct omapfb_plane_struct *plane,
421 struct fb_var_screeninfo *var)
422{
423 switch (var->nonstd) {
424 case 0:
425 break;
426 case OMAPFB_COLOR_YUV422:
427 var->bits_per_pixel = 16;
428 plane->color_mode = var->nonstd;
429 return 0;
430 case OMAPFB_COLOR_YUV420:
431 var->bits_per_pixel = 12;
432 plane->color_mode = var->nonstd;
433 return 0;
434 case OMAPFB_COLOR_YUY422:
435 var->bits_per_pixel = 16;
436 plane->color_mode = var->nonstd;
437 return 0;
438 default:
439 return -EINVAL;
440 }
441
442 switch (var->bits_per_pixel) {
443 case 1:
444 plane->color_mode = OMAPFB_COLOR_CLUT_1BPP;
445 return 0;
446 case 2:
447 plane->color_mode = OMAPFB_COLOR_CLUT_2BPP;
448 return 0;
449 case 4:
450 plane->color_mode = OMAPFB_COLOR_CLUT_4BPP;
451 return 0;
452 case 8:
453 plane->color_mode = OMAPFB_COLOR_CLUT_8BPP;
454 return 0;
455 case 12:
456 var->bits_per_pixel = 16;
457 plane->color_mode = OMAPFB_COLOR_RGB444;
458 return 0;
459 case 16:
460 plane->color_mode = OMAPFB_COLOR_RGB565;
461 return 0;
462 default:
463 return -EINVAL;
464 }
465}
466
467/*
468 * Check the values in var against our capabilities and in case of out of
469 * bound values try to adjust them.
470 */
471static int set_fb_var(struct fb_info *fbi,
472 struct fb_var_screeninfo *var)
473{
474 int bpp;
475 unsigned long max_frame_size;
476 unsigned long line_size;
477 int xres_min, xres_max;
478 int yres_min, yres_max;
479 struct omapfb_plane_struct *plane = fbi->par;
480 struct omapfb_device *fbdev = plane->fbdev;
481 struct lcd_panel *panel = fbdev->panel;
482
483 if (set_color_mode(plane, var) < 0)
484 return -EINVAL;
485
486 bpp = var->bits_per_pixel;
487 if (plane->color_mode == OMAPFB_COLOR_RGB444)
488 bpp = 16;
489
490 switch (var->rotate) {
491 case 0:
492 case 180:
493 xres_min = OMAPFB_PLANE_XRES_MIN;
494 xres_max = panel->x_res;
495 yres_min = OMAPFB_PLANE_YRES_MIN;
496 yres_max = panel->y_res;
497 if (cpu_is_omap15xx()) {
498 var->xres = panel->x_res;
499 var->yres = panel->y_res;
500 }
501 break;
502 case 90:
503 case 270:
504 xres_min = OMAPFB_PLANE_YRES_MIN;
505 xres_max = panel->y_res;
506 yres_min = OMAPFB_PLANE_XRES_MIN;
507 yres_max = panel->x_res;
508 if (cpu_is_omap15xx()) {
509 var->xres = panel->y_res;
510 var->yres = panel->x_res;
511 }
512 break;
513 default:
514 return -EINVAL;
515 }
516
517 if (var->xres < xres_min)
518 var->xres = xres_min;
519 if (var->yres < yres_min)
520 var->yres = yres_min;
521 if (var->xres > xres_max)
522 var->xres = xres_max;
523 if (var->yres > yres_max)
524 var->yres = yres_max;
525
526 if (var->xres_virtual < var->xres)
527 var->xres_virtual = var->xres;
528 if (var->yres_virtual < var->yres)
529 var->yres_virtual = var->yres;
530 max_frame_size = fbdev->mem_desc.region[plane->idx].size;
531 line_size = var->xres_virtual * bpp / 8;
532 if (line_size * var->yres_virtual > max_frame_size) {
533 /* Try to keep yres_virtual first */
534 line_size = max_frame_size / var->yres_virtual;
535 var->xres_virtual = line_size * 8 / bpp;
536 if (var->xres_virtual < var->xres) {
537 /* Still doesn't fit. Shrink yres_virtual too */
538 var->xres_virtual = var->xres;
539 line_size = var->xres * bpp / 8;
540 var->yres_virtual = max_frame_size / line_size;
541 }
542 /* Recheck this, as the virtual size changed. */
543 if (var->xres_virtual < var->xres)
544 var->xres = var->xres_virtual;
545 if (var->yres_virtual < var->yres)
546 var->yres = var->yres_virtual;
547 if (var->xres < xres_min || var->yres < yres_min)
548 return -EINVAL;
549 }
550 if (var->xres + var->xoffset > var->xres_virtual)
551 var->xoffset = var->xres_virtual - var->xres;
552 if (var->yres + var->yoffset > var->yres_virtual)
553 var->yoffset = var->yres_virtual - var->yres;
554 line_size = var->xres * bpp / 8;
555
556 if (plane->color_mode == OMAPFB_COLOR_RGB444) {
557 var->red.offset = 8; var->red.length = 4;
558 var->red.msb_right = 0;
559 var->green.offset = 4; var->green.length = 4;
560 var->green.msb_right = 0;
561 var->blue.offset = 0; var->blue.length = 4;
562 var->blue.msb_right = 0;
563 } else {
564 var->red.offset = 11; var->red.length = 5;
565 var->red.msb_right = 0;
566 var->green.offset = 5; var->green.length = 6;
567 var->green.msb_right = 0;
568 var->blue.offset = 0; var->blue.length = 5;
569 var->blue.msb_right = 0;
570 }
571
572 var->height = -1;
573 var->width = -1;
574 var->grayscale = 0;
575
576 /* pixclock in ps, the rest in pixclock */
577 var->pixclock = 10000000 / (panel->pixel_clock / 100);
578 var->left_margin = panel->hfp;
579 var->right_margin = panel->hbp;
580 var->upper_margin = panel->vfp;
581 var->lower_margin = panel->vbp;
582 var->hsync_len = panel->hsw;
583 var->vsync_len = panel->vsw;
584
585 /* TODO: get these from panel->config */
586 var->vmode = FB_VMODE_NONINTERLACED;
587 var->sync = 0;
588
589 return 0;
590}
591
592
593/* Set rotation (0, 90, 180, 270 degree), and switch to the new mode. */
594static void omapfb_rotate(struct fb_info *fbi, int rotate)
595{
596 struct omapfb_plane_struct *plane = fbi->par;
597 struct omapfb_device *fbdev = plane->fbdev;
598
599 omapfb_rqueue_lock(fbdev);
600 if (cpu_is_omap15xx() && rotate != fbi->var.rotate) {
601 struct fb_var_screeninfo *new_var = &fbdev->new_var;
602
603 memcpy(new_var, &fbi->var, sizeof(*new_var));
604 new_var->rotate = rotate;
605 if (set_fb_var(fbi, new_var) == 0 &&
606 memcmp(new_var, &fbi->var, sizeof(*new_var))) {
607 memcpy(&fbi->var, new_var, sizeof(*new_var));
608 ctrl_change_mode(fbi);
609 }
610 }
611 omapfb_rqueue_unlock(fbdev);
612}
613
614/*
615 * Set new x,y offsets in the virtual display for the visible area and switch
616 * to the new mode.
617 */
618static int omapfb_pan_display(struct fb_var_screeninfo *var,
619 struct fb_info *fbi)
620{
621 struct omapfb_plane_struct *plane = fbi->par;
622 struct omapfb_device *fbdev = plane->fbdev;
623 int r = 0;
624
625 omapfb_rqueue_lock(fbdev);
626 if (var->xoffset != fbi->var.xoffset ||
627 var->yoffset != fbi->var.yoffset) {
628 struct fb_var_screeninfo *new_var = &fbdev->new_var;
629
630 memcpy(new_var, &fbi->var, sizeof(*new_var));
631 new_var->xoffset = var->xoffset;
632 new_var->yoffset = var->yoffset;
633 if (set_fb_var(fbi, new_var))
634 r = -EINVAL;
635 else {
636 memcpy(&fbi->var, new_var, sizeof(*new_var));
637 ctrl_change_mode(fbi);
638 }
639 }
640 omapfb_rqueue_unlock(fbdev);
641
642 return r;
643}
644
645/* Set mirror to vertical axis and switch to the new mode. */
646static int omapfb_mirror(struct fb_info *fbi, int mirror)
647{
648 struct omapfb_plane_struct *plane = fbi->par;
649 struct omapfb_device *fbdev = plane->fbdev;
650 int r = 0;
651
652 omapfb_rqueue_lock(fbdev);
653 mirror = mirror ? 1 : 0;
654 if (cpu_is_omap15xx())
655 r = -EINVAL;
656 else if (mirror != plane->info.mirror) {
657 plane->info.mirror = mirror;
658 r = ctrl_change_mode(fbi);
659 }
660 omapfb_rqueue_unlock(fbdev);
661
662 return r;
663}
664
665/*
666 * Check values in var, try to adjust them in case of out of bound values if
667 * possible, or return error.
668 */
669static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
670{
671 struct omapfb_plane_struct *plane = fbi->par;
672 struct omapfb_device *fbdev = plane->fbdev;
673 int r;
674
675 omapfb_rqueue_lock(fbdev);
676 if (fbdev->ctrl->sync != NULL)
677 fbdev->ctrl->sync();
678 r = set_fb_var(fbi, var);
679 omapfb_rqueue_unlock(fbdev);
680
681 return r;
682}
683
684/*
685 * Switch to a new mode. The parameters for it has been check already by
686 * omapfb_check_var.
687 */
688static int omapfb_set_par(struct fb_info *fbi)
689{
690 struct omapfb_plane_struct *plane = fbi->par;
691 struct omapfb_device *fbdev = plane->fbdev;
692 int r = 0;
693
694 omapfb_rqueue_lock(fbdev);
695 set_fb_fix(fbi);
696 r = ctrl_change_mode(fbi);
697 omapfb_rqueue_unlock(fbdev);
698
699 return r;
700}
701
702int omapfb_update_window_async(struct fb_info *fbi,
703 struct omapfb_update_window *win,
704 void (*callback)(void *),
705 void *callback_data)
706{
707 struct omapfb_plane_struct *plane = fbi->par;
708 struct omapfb_device *fbdev = plane->fbdev;
709 struct fb_var_screeninfo *var;
710
711 var = &fbi->var;
712 if (win->x >= var->xres || win->y >= var->yres ||
713 win->out_x > var->xres || win->out_y >= var->yres)
714 return -EINVAL;
715
716 if (!fbdev->ctrl->update_window ||
717 fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
718 return -ENODEV;
719
720 if (win->x + win->width >= var->xres)
721 win->width = var->xres - win->x;
722 if (win->y + win->height >= var->yres)
723 win->height = var->yres - win->y;
724 /* The out sizes should be cropped to the LCD size */
725 if (win->out_x + win->out_width > fbdev->panel->x_res)
726 win->out_width = fbdev->panel->x_res - win->out_x;
727 if (win->out_y + win->out_height > fbdev->panel->y_res)
728 win->out_height = fbdev->panel->y_res - win->out_y;
729 if (!win->width || !win->height || !win->out_width || !win->out_height)
730 return 0;
731
732 return fbdev->ctrl->update_window(fbi, win, callback, callback_data);
733}
734EXPORT_SYMBOL(omapfb_update_window_async);
735
736static int omapfb_update_win(struct fb_info *fbi,
737 struct omapfb_update_window *win)
738{
739 struct omapfb_plane_struct *plane = fbi->par;
740 int ret;
741
742 omapfb_rqueue_lock(plane->fbdev);
743 ret = omapfb_update_window_async(fbi, win, NULL, 0);
744 omapfb_rqueue_unlock(plane->fbdev);
745
746 return ret;
747}
748
749static int omapfb_update_full_screen(struct fb_info *fbi)
750{
751 struct omapfb_plane_struct *plane = fbi->par;
752 struct omapfb_device *fbdev = plane->fbdev;
753 struct omapfb_update_window win;
754 int r;
755
756 if (!fbdev->ctrl->update_window ||
757 fbdev->ctrl->get_update_mode() != OMAPFB_MANUAL_UPDATE)
758 return -ENODEV;
759
760 win.x = 0;
761 win.y = 0;
762 win.width = fbi->var.xres;
763 win.height = fbi->var.yres;
764 win.out_x = 0;
765 win.out_y = 0;
766 win.out_width = fbi->var.xres;
767 win.out_height = fbi->var.yres;
768 win.format = 0;
769
770 omapfb_rqueue_lock(fbdev);
771 r = fbdev->ctrl->update_window(fbi, &win, NULL, 0);
772 omapfb_rqueue_unlock(fbdev);
773
774 return r;
775}
776
777static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
778{
779 struct omapfb_plane_struct *plane = fbi->par;
780 struct omapfb_device *fbdev = plane->fbdev;
781 struct lcd_panel *panel = fbdev->panel;
782 struct omapfb_plane_info old_info;
783 int r = 0;
784
785 if (pi->pos_x + pi->out_width > panel->x_res ||
786 pi->pos_y + pi->out_height > panel->y_res)
787 return -EINVAL;
788
789 omapfb_rqueue_lock(fbdev);
790 if (pi->enabled && !fbdev->mem_desc.region[plane->idx].size) {
791 /*
792 * This plane's memory was freed, can't enable it
793 * until it's reallocated.
794 */
795 r = -EINVAL;
796 goto out;
797 }
798 old_info = plane->info;
799 plane->info = *pi;
800 if (pi->enabled) {
801 r = ctrl_change_mode(fbi);
802 if (r < 0) {
803 plane->info = old_info;
804 goto out;
805 }
806 }
807 r = fbdev->ctrl->enable_plane(plane->idx, pi->enabled);
808 if (r < 0) {
809 plane->info = old_info;
810 goto out;
811 }
812out:
813 omapfb_rqueue_unlock(fbdev);
814 return r;
815}
816
817static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
818{
819 struct omapfb_plane_struct *plane = fbi->par;
820
821 *pi = plane->info;
822 return 0;
823}
824
825static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
826{
827 struct omapfb_plane_struct *plane = fbi->par;
828 struct omapfb_device *fbdev = plane->fbdev;
829 struct omapfb_mem_region *rg = &fbdev->mem_desc.region[plane->idx];
830 size_t size;
831 int r = 0;
832
833 if (fbdev->ctrl->setup_mem == NULL)
834 return -ENODEV;
835 if (mi->type > OMAPFB_MEMTYPE_MAX)
836 return -EINVAL;
837
838 size = PAGE_ALIGN(mi->size);
839 omapfb_rqueue_lock(fbdev);
840 if (plane->info.enabled) {
841 r = -EBUSY;
842 goto out;
843 }
844 if (rg->size != size || rg->type != mi->type) {
845 struct fb_var_screeninfo *new_var = &fbdev->new_var;
846 unsigned long old_size = rg->size;
847 u8 old_type = rg->type;
848 unsigned long paddr;
849
850 rg->size = size;
851 rg->type = mi->type;
852 /*
853 * size == 0 is a special case, for which we
854 * don't check / adjust the screen parameters.
855 * This isn't a problem since the plane can't
856 * be reenabled unless its size is > 0.
857 */
858 if (old_size != size && size) {
859 if (size) {
860 memcpy(new_var, &fbi->var, sizeof(*new_var));
861 r = set_fb_var(fbi, new_var);
862 if (r < 0)
863 goto out;
864 }
865 }
866
867 if (fbdev->ctrl->sync)
868 fbdev->ctrl->sync();
869 r = fbdev->ctrl->setup_mem(plane->idx, size, mi->type, &paddr);
870 if (r < 0) {
871 /* Revert changes. */
872 rg->size = old_size;
873 rg->type = old_type;
874 goto out;
875 }
876 rg->paddr = paddr;
877
878 if (old_size != size) {
879 if (size) {
880 memcpy(&fbi->var, new_var, sizeof(fbi->var));
881 set_fb_fix(fbi);
882 } else {
883 /*
884 * Set these explicitly to indicate that the
885 * plane memory is dealloce'd, the other
886 * screen parameters in var / fix are invalid.
887 */
888 fbi->fix.smem_start = 0;
889 fbi->fix.smem_len = 0;
890 }
891 }
892 }
893out:
894 omapfb_rqueue_unlock(fbdev);
895
896 return r;
897}
898
899static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
900{
901 struct omapfb_plane_struct *plane = fbi->par;
902 struct omapfb_device *fbdev = plane->fbdev;
903 struct omapfb_mem_region *rg;
904
905 rg = &fbdev->mem_desc.region[plane->idx];
906 memset(mi, 0, sizeof(*mi));
907 mi->size = rg->size;
908 mi->type = rg->type;
909
910 return 0;
911}
912
913static int omapfb_set_color_key(struct omapfb_device *fbdev,
914 struct omapfb_color_key *ck)
915{
916 int r;
917
918 if (!fbdev->ctrl->set_color_key)
919 return -ENODEV;
920
921 omapfb_rqueue_lock(fbdev);
922 r = fbdev->ctrl->set_color_key(ck);
923 omapfb_rqueue_unlock(fbdev);
924
925 return r;
926}
927
928static int omapfb_get_color_key(struct omapfb_device *fbdev,
929 struct omapfb_color_key *ck)
930{
931 int r;
932
933 if (!fbdev->ctrl->get_color_key)
934 return -ENODEV;
935
936 omapfb_rqueue_lock(fbdev);
937 r = fbdev->ctrl->get_color_key(ck);
938 omapfb_rqueue_unlock(fbdev);
939
940 return r;
941}
942
943static struct blocking_notifier_head omapfb_client_list[OMAPFB_PLANE_NUM];
944static int notifier_inited;
945
946static void omapfb_init_notifier(void)
947{
948 int i;
949
950 for (i = 0; i < OMAPFB_PLANE_NUM; i++)
951 BLOCKING_INIT_NOTIFIER_HEAD(&omapfb_client_list[i]);
952}
953
954int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
955 omapfb_notifier_callback_t callback,
956 void *callback_data)
957{
958 int r;
959
960 if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
961 return -EINVAL;
962
963 if (!notifier_inited) {
964 omapfb_init_notifier();
965 notifier_inited = 1;
966 }
967
968 omapfb_nb->nb.notifier_call = (int (*)(struct notifier_block *,
969 unsigned long, void *))callback;
970 omapfb_nb->data = callback_data;
971 r = blocking_notifier_chain_register(
972 &omapfb_client_list[omapfb_nb->plane_idx],
973 &omapfb_nb->nb);
974 if (r)
975 return r;
976 if (omapfb_dev != NULL &&
977 omapfb_dev->ctrl && omapfb_dev->ctrl->bind_client) {
978 omapfb_dev->ctrl->bind_client(omapfb_nb);
979 }
980
981 return 0;
982}
983EXPORT_SYMBOL(omapfb_register_client);
984
985int omapfb_unregister_client(struct omapfb_notifier_block *omapfb_nb)
986{
987 return blocking_notifier_chain_unregister(
988 &omapfb_client_list[omapfb_nb->plane_idx], &omapfb_nb->nb);
989}
990EXPORT_SYMBOL(omapfb_unregister_client);
991
992void omapfb_notify_clients(struct omapfb_device *fbdev, unsigned long event)
993{
994 int i;
995
996 if (!notifier_inited)
997 /* no client registered yet */
998 return;
999
1000 for (i = 0; i < OMAPFB_PLANE_NUM; i++)
1001 blocking_notifier_call_chain(&omapfb_client_list[i], event,
1002 fbdev->fb_info[i]);
1003}
1004EXPORT_SYMBOL(omapfb_notify_clients);
1005
1006static int omapfb_set_update_mode(struct omapfb_device *fbdev,
1007 enum omapfb_update_mode mode)
1008{
1009 int r;
1010
1011 omapfb_rqueue_lock(fbdev);
1012 r = fbdev->ctrl->set_update_mode(mode);
1013 omapfb_rqueue_unlock(fbdev);
1014
1015 return r;
1016}
1017
1018static enum omapfb_update_mode omapfb_get_update_mode(struct omapfb_device *fbdev)
1019{
1020 int r;
1021
1022 omapfb_rqueue_lock(fbdev);
1023 r = fbdev->ctrl->get_update_mode();
1024 omapfb_rqueue_unlock(fbdev);
1025
1026 return r;
1027}
1028
1029static void omapfb_get_caps(struct omapfb_device *fbdev, int plane,
1030 struct omapfb_caps *caps)
1031{
1032 memset(caps, 0, sizeof(*caps));
1033 fbdev->ctrl->get_caps(plane, caps);
1034 caps->ctrl |= fbdev->panel->get_caps(fbdev->panel);
1035}
1036
1037/* For lcd testing */
1038void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval)
1039{
1040 omapfb_rqueue_lock(fbdev);
1041 *(u16 *)fbdev->mem_desc.region[0].vaddr = pixval;
1042 if (fbdev->ctrl->get_update_mode() == OMAPFB_MANUAL_UPDATE) {
1043 struct omapfb_update_window win;
1044
1045 memset(&win, 0, sizeof(win));
1046 win.width = 2;
1047 win.height = 2;
1048 win.out_width = 2;
1049 win.out_height = 2;
1050 fbdev->ctrl->update_window(fbdev->fb_info[0], &win, NULL, 0);
1051 }
1052 omapfb_rqueue_unlock(fbdev);
1053}
1054EXPORT_SYMBOL(omapfb_write_first_pixel);
1055
1056/*
1057 * Ioctl interface. Part of the kernel mode frame buffer API is duplicated
1058 * here to be accessible by user mode code.
1059 */
1060static int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd,
1061 unsigned long arg)
1062{
1063 struct omapfb_plane_struct *plane = fbi->par;
1064 struct omapfb_device *fbdev = plane->fbdev;
1065 struct fb_ops *ops = fbi->fbops;
1066 union {
1067 struct omapfb_update_window update_window;
1068 struct omapfb_plane_info plane_info;
1069 struct omapfb_mem_info mem_info;
1070 struct omapfb_color_key color_key;
1071 enum omapfb_update_mode update_mode;
1072 struct omapfb_caps caps;
1073 unsigned int mirror;
1074 int plane_out;
1075 int enable_plane;
1076 } p;
1077 int r = 0;
1078
1079 BUG_ON(!ops);
1080 switch (cmd) {
1081 case OMAPFB_MIRROR:
1082 if (get_user(p.mirror, (int __user *)arg))
1083 r = -EFAULT;
1084 else
1085 omapfb_mirror(fbi, p.mirror);
1086 break;
1087 case OMAPFB_SYNC_GFX:
1088 omapfb_sync(fbi);
1089 break;
1090 case OMAPFB_VSYNC:
1091 break;
1092 case OMAPFB_SET_UPDATE_MODE:
1093 if (get_user(p.update_mode, (int __user *)arg))
1094 r = -EFAULT;
1095 else
1096 r = omapfb_set_update_mode(fbdev, p.update_mode);
1097 break;
1098 case OMAPFB_GET_UPDATE_MODE:
1099 p.update_mode = omapfb_get_update_mode(fbdev);
1100 if (put_user(p.update_mode,
1101 (enum omapfb_update_mode __user *)arg))
1102 r = -EFAULT;
1103 break;
1104 case OMAPFB_UPDATE_WINDOW_OLD:
1105 if (copy_from_user(&p.update_window, (void __user *)arg,
1106 sizeof(struct omapfb_update_window_old)))
1107 r = -EFAULT;
1108 else {
1109 struct omapfb_update_window *u = &p.update_window;
1110 u->out_x = u->x;
1111 u->out_y = u->y;
1112 u->out_width = u->width;
1113 u->out_height = u->height;
1114 memset(u->reserved, 0, sizeof(u->reserved));
1115 r = omapfb_update_win(fbi, u);
1116 }
1117 break;
1118 case OMAPFB_UPDATE_WINDOW:
1119 if (copy_from_user(&p.update_window, (void __user *)arg,
1120 sizeof(p.update_window)))
1121 r = -EFAULT;
1122 else
1123 r = omapfb_update_win(fbi, &p.update_window);
1124 break;
1125 case OMAPFB_SETUP_PLANE:
1126 if (copy_from_user(&p.plane_info, (void __user *)arg,
1127 sizeof(p.plane_info)))
1128 r = -EFAULT;
1129 else
1130 r = omapfb_setup_plane(fbi, &p.plane_info);
1131 break;
1132 case OMAPFB_QUERY_PLANE:
1133 if ((r = omapfb_query_plane(fbi, &p.plane_info)) < 0)
1134 break;
1135 if (copy_to_user((void __user *)arg, &p.plane_info,
1136 sizeof(p.plane_info)))
1137 r = -EFAULT;
1138 break;
1139 case OMAPFB_SETUP_MEM:
1140 if (copy_from_user(&p.mem_info, (void __user *)arg,
1141 sizeof(p.mem_info)))
1142 r = -EFAULT;
1143 else
1144 r = omapfb_setup_mem(fbi, &p.mem_info);
1145 break;
1146 case OMAPFB_QUERY_MEM:
1147 if ((r = omapfb_query_mem(fbi, &p.mem_info)) < 0)
1148 break;
1149 if (copy_to_user((void __user *)arg, &p.mem_info,
1150 sizeof(p.mem_info)))
1151 r = -EFAULT;
1152 break;
1153 case OMAPFB_SET_COLOR_KEY:
1154 if (copy_from_user(&p.color_key, (void __user *)arg,
1155 sizeof(p.color_key)))
1156 r = -EFAULT;
1157 else
1158 r = omapfb_set_color_key(fbdev, &p.color_key);
1159 break;
1160 case OMAPFB_GET_COLOR_KEY:
1161 if ((r = omapfb_get_color_key(fbdev, &p.color_key)) < 0)
1162 break;
1163 if (copy_to_user((void __user *)arg, &p.color_key,
1164 sizeof(p.color_key)))
1165 r = -EFAULT;
1166 break;
1167 case OMAPFB_GET_CAPS:
1168 omapfb_get_caps(fbdev, plane->idx, &p.caps);
1169 if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
1170 r = -EFAULT;
1171 break;
1172 case OMAPFB_LCD_TEST:
1173 {
1174 int test_num;
1175
1176 if (get_user(test_num, (int __user *)arg)) {
1177 r = -EFAULT;
1178 break;
1179 }
1180 if (!fbdev->panel->run_test) {
1181 r = -EINVAL;
1182 break;
1183 }
1184 r = fbdev->panel->run_test(fbdev->panel, test_num);
1185 break;
1186 }
1187 case OMAPFB_CTRL_TEST:
1188 {
1189 int test_num;
1190
1191 if (get_user(test_num, (int __user *)arg)) {
1192 r = -EFAULT;
1193 break;
1194 }
1195 if (!fbdev->ctrl->run_test) {
1196 r = -EINVAL;
1197 break;
1198 }
1199 r = fbdev->ctrl->run_test(test_num);
1200 break;
1201 }
1202 default:
1203 r = -EINVAL;
1204 }
1205
1206 return r;
1207}
1208
1209static int omapfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
1210{
1211 struct omapfb_plane_struct *plane = info->par;
1212 struct omapfb_device *fbdev = plane->fbdev;
1213 int r;
1214
1215 omapfb_rqueue_lock(fbdev);
1216 r = fbdev->ctrl->mmap(info, vma);
1217 omapfb_rqueue_unlock(fbdev);
1218
1219 return r;
1220}
1221
1222/*
1223 * Callback table for the frame buffer framework. Some of these pointers
1224 * will be changed according to the current setting of fb_info->accel_flags.
1225 */
1226static struct fb_ops omapfb_ops = {
1227 .owner = THIS_MODULE,
1228 .fb_open = omapfb_open,
1229 .fb_release = omapfb_release,
1230 .fb_setcolreg = omapfb_setcolreg,
1231 .fb_setcmap = omapfb_setcmap,
1232 .fb_fillrect = cfb_fillrect,
1233 .fb_copyarea = cfb_copyarea,
1234 .fb_imageblit = cfb_imageblit,
1235 .fb_blank = omapfb_blank,
1236 .fb_ioctl = omapfb_ioctl,
1237 .fb_check_var = omapfb_check_var,
1238 .fb_set_par = omapfb_set_par,
1239 .fb_rotate = omapfb_rotate,
1240 .fb_pan_display = omapfb_pan_display,
1241};
1242
1243/*
1244 * ---------------------------------------------------------------------------
1245 * Sysfs interface
1246 * ---------------------------------------------------------------------------
1247 */
1248/* omapfbX sysfs entries */
1249static ssize_t omapfb_show_caps_num(struct device *dev,
1250 struct device_attribute *attr, char *buf)
1251{
1252 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1253 int plane;
1254 size_t size;
1255 struct omapfb_caps caps;
1256
1257 plane = 0;
1258 size = 0;
1259 while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
1260 omapfb_get_caps(fbdev, plane, &caps);
1261 size += snprintf(&buf[size], PAGE_SIZE - size,
1262 "plane#%d %#010x %#010x %#010x\n",
1263 plane, caps.ctrl, caps.plane_color, caps.wnd_color);
1264 plane++;
1265 }
1266 return size;
1267}
1268
1269static ssize_t omapfb_show_caps_text(struct device *dev,
1270 struct device_attribute *attr, char *buf)
1271{
1272 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1273 int i;
1274 struct omapfb_caps caps;
1275 int plane;
1276 size_t size;
1277
1278 plane = 0;
1279 size = 0;
1280 while (size < PAGE_SIZE && plane < OMAPFB_PLANE_NUM) {
1281 omapfb_get_caps(fbdev, plane, &caps);
1282 size += snprintf(&buf[size], PAGE_SIZE - size,
1283 "plane#%d:\n", plane);
1284 for (i = 0; i < ARRAY_SIZE(ctrl_caps) &&
1285 size < PAGE_SIZE; i++) {
1286 if (ctrl_caps[i].flag & caps.ctrl)
1287 size += snprintf(&buf[size], PAGE_SIZE - size,
1288 " %s\n", ctrl_caps[i].name);
1289 }
1290 size += snprintf(&buf[size], PAGE_SIZE - size,
1291 " plane colors:\n");
1292 for (i = 0; i < ARRAY_SIZE(color_caps) &&
1293 size < PAGE_SIZE; i++) {
1294 if (color_caps[i].flag & caps.plane_color)
1295 size += snprintf(&buf[size], PAGE_SIZE - size,
1296 " %s\n", color_caps[i].name);
1297 }
1298 size += snprintf(&buf[size], PAGE_SIZE - size,
1299 " window colors:\n");
1300 for (i = 0; i < ARRAY_SIZE(color_caps) &&
1301 size < PAGE_SIZE; i++) {
1302 if (color_caps[i].flag & caps.wnd_color)
1303 size += snprintf(&buf[size], PAGE_SIZE - size,
1304 " %s\n", color_caps[i].name);
1305 }
1306
1307 plane++;
1308 }
1309 return size;
1310}
1311
1312static DEVICE_ATTR(caps_num, 0444, omapfb_show_caps_num, NULL);
1313static DEVICE_ATTR(caps_text, 0444, omapfb_show_caps_text, NULL);
1314
1315/* panel sysfs entries */
1316static ssize_t omapfb_show_panel_name(struct device *dev,
1317 struct device_attribute *attr, char *buf)
1318{
1319 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1320
1321 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->panel->name);
1322}
1323
1324static ssize_t omapfb_show_bklight_level(struct device *dev,
1325 struct device_attribute *attr,
1326 char *buf)
1327{
1328 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1329 int r;
1330
1331 if (fbdev->panel->get_bklight_level) {
1332 r = snprintf(buf, PAGE_SIZE, "%d\n",
1333 fbdev->panel->get_bklight_level(fbdev->panel));
1334 } else
1335 r = -ENODEV;
1336 return r;
1337}
1338
1339static ssize_t omapfb_store_bklight_level(struct device *dev,
1340 struct device_attribute *attr,
1341 const char *buf, size_t size)
1342{
1343 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1344 int r;
1345
1346 if (fbdev->panel->set_bklight_level) {
1347 unsigned int level;
1348
1349 if (sscanf(buf, "%10d", &level) == 1) {
1350 r = fbdev->panel->set_bklight_level(fbdev->panel,
1351 level);
1352 } else
1353 r = -EINVAL;
1354 } else
1355 r = -ENODEV;
1356 return r ? r : size;
1357}
1358
1359static ssize_t omapfb_show_bklight_max(struct device *dev,
1360 struct device_attribute *attr, char *buf)
1361{
1362 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1363 int r;
1364
1365 if (fbdev->panel->get_bklight_level) {
1366 r = snprintf(buf, PAGE_SIZE, "%d\n",
1367 fbdev->panel->get_bklight_max(fbdev->panel));
1368 } else
1369 r = -ENODEV;
1370 return r;
1371}
1372
1373static struct device_attribute dev_attr_panel_name =
1374 __ATTR(name, 0444, omapfb_show_panel_name, NULL);
1375static DEVICE_ATTR(backlight_level, 0664,
1376 omapfb_show_bklight_level, omapfb_store_bklight_level);
1377static DEVICE_ATTR(backlight_max, 0444, omapfb_show_bklight_max, NULL);
1378
1379static struct attribute *panel_attrs[] = {
1380 &dev_attr_panel_name.attr,
1381 &dev_attr_backlight_level.attr,
1382 &dev_attr_backlight_max.attr,
1383 NULL,
1384};
1385
1386static struct attribute_group panel_attr_grp = {
1387 .name = "panel",
1388 .attrs = panel_attrs,
1389};
1390
1391/* ctrl sysfs entries */
1392static ssize_t omapfb_show_ctrl_name(struct device *dev,
1393 struct device_attribute *attr, char *buf)
1394{
1395 struct omapfb_device *fbdev = (struct omapfb_device *)dev->driver_data;
1396
1397 return snprintf(buf, PAGE_SIZE, "%s\n", fbdev->ctrl->name);
1398}
1399
1400static struct device_attribute dev_attr_ctrl_name =
1401 __ATTR(name, 0444, omapfb_show_ctrl_name, NULL);
1402
1403static struct attribute *ctrl_attrs[] = {
1404 &dev_attr_ctrl_name.attr,
1405 NULL,
1406};
1407
1408static struct attribute_group ctrl_attr_grp = {
1409 .name = "ctrl",
1410 .attrs = ctrl_attrs,
1411};
1412
1413static int omapfb_register_sysfs(struct omapfb_device *fbdev)
1414{
1415 int r;
1416
1417 if ((r = device_create_file(fbdev->dev, &dev_attr_caps_num)))
1418 goto fail0;
1419
1420 if ((r = device_create_file(fbdev->dev, &dev_attr_caps_text)))
1421 goto fail1;
1422
1423 if ((r = sysfs_create_group(&fbdev->dev->kobj, &panel_attr_grp)))
1424 goto fail2;
1425
1426 if ((r = sysfs_create_group(&fbdev->dev->kobj, &ctrl_attr_grp)))
1427 goto fail3;
1428
1429 return 0;
1430fail3:
1431 sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
1432fail2:
1433 device_remove_file(fbdev->dev, &dev_attr_caps_text);
1434fail1:
1435 device_remove_file(fbdev->dev, &dev_attr_caps_num);
1436fail0:
1437 dev_err(fbdev->dev, "unable to register sysfs interface\n");
1438 return r;
1439}
1440
1441static void omapfb_unregister_sysfs(struct omapfb_device *fbdev)
1442{
1443 sysfs_remove_group(&fbdev->dev->kobj, &ctrl_attr_grp);
1444 sysfs_remove_group(&fbdev->dev->kobj, &panel_attr_grp);
1445 device_remove_file(fbdev->dev, &dev_attr_caps_num);
1446 device_remove_file(fbdev->dev, &dev_attr_caps_text);
1447}
1448
1449/*
1450 * ---------------------------------------------------------------------------
1451 * LDM callbacks
1452 * ---------------------------------------------------------------------------
1453 */
1454/* Initialize system fb_info object and set the default video mode.
1455 * The frame buffer memory already allocated by lcddma_init
1456 */
1457static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
1458{
1459 struct fb_var_screeninfo *var = &info->var;
1460 struct fb_fix_screeninfo *fix = &info->fix;
1461 int r = 0;
1462
1463 info->fbops = &omapfb_ops;
1464 info->flags = FBINFO_FLAG_DEFAULT;
1465
1466 strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
1467
1468 info->pseudo_palette = fbdev->pseudo_palette;
1469
1470 var->accel_flags = def_accel ? FB_ACCELF_TEXT : 0;
1471 var->xres = def_vxres;
1472 var->yres = def_vyres;
1473 var->xres_virtual = def_vxres;
1474 var->yres_virtual = def_vyres;
1475 var->rotate = def_rotate;
1476 var->bits_per_pixel = fbdev->panel->bpp;
1477
1478 set_fb_var(info, var);
1479 set_fb_fix(info);
1480
1481 r = fb_alloc_cmap(&info->cmap, 16, 0);
1482 if (r != 0)
1483 dev_err(fbdev->dev, "unable to allocate color map memory\n");
1484
1485 return r;
1486}
1487
1488/* Release the fb_info object */
1489static void fbinfo_cleanup(struct omapfb_device *fbdev, struct fb_info *fbi)
1490{
1491 fb_dealloc_cmap(&fbi->cmap);
1492}
1493
1494static void planes_cleanup(struct omapfb_device *fbdev)
1495{
1496 int i;
1497
1498 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
1499 if (fbdev->fb_info[i] == NULL)
1500 break;
1501 fbinfo_cleanup(fbdev, fbdev->fb_info[i]);
1502 framebuffer_release(fbdev->fb_info[i]);
1503 }
1504}
1505
1506static int planes_init(struct omapfb_device *fbdev)
1507{
1508 struct fb_info *fbi;
1509 int i;
1510 int r;
1511
1512 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
1513 struct omapfb_plane_struct *plane;
1514 fbi = framebuffer_alloc(sizeof(struct omapfb_plane_struct),
1515 fbdev->dev);
1516 if (fbi == NULL) {
1517 dev_err(fbdev->dev,
1518 "unable to allocate memory for plane info\n");
1519 planes_cleanup(fbdev);
1520 return -ENOMEM;
1521 }
1522 plane = fbi->par;
1523 plane->idx = i;
1524 plane->fbdev = fbdev;
1525 plane->info.mirror = def_mirror;
1526 fbdev->fb_info[i] = fbi;
1527
1528 if ((r = fbinfo_init(fbdev, fbi)) < 0) {
1529 framebuffer_release(fbi);
1530 planes_cleanup(fbdev);
1531 return r;
1532 }
1533 plane->info.out_width = fbi->var.xres;
1534 plane->info.out_height = fbi->var.yres;
1535 }
1536 return 0;
1537}
1538
1539/*
1540 * Free driver resources. Can be called to rollback an aborted initialization
1541 * sequence.
1542 */
1543static void omapfb_free_resources(struct omapfb_device *fbdev, int state)
1544{
1545 int i;
1546
1547 switch (state) {
1548 case OMAPFB_ACTIVE:
1549 for (i = 0; i < fbdev->mem_desc.region_cnt; i++)
1550 unregister_framebuffer(fbdev->fb_info[i]);
1551 case 7:
1552 omapfb_unregister_sysfs(fbdev);
1553 case 6:
1554 fbdev->panel->disable(fbdev->panel);
1555 case 5:
1556 omapfb_set_update_mode(fbdev, OMAPFB_UPDATE_DISABLED);
1557 case 4:
1558 planes_cleanup(fbdev);
1559 case 3:
1560 ctrl_cleanup(fbdev);
1561 case 2:
1562 fbdev->panel->cleanup(fbdev->panel);
1563 case 1:
1564 dev_set_drvdata(fbdev->dev, NULL);
1565 kfree(fbdev);
1566 case 0:
1567 /* nothing to free */
1568 break;
1569 default:
1570 BUG();
1571 }
1572}
1573
1574static int omapfb_find_ctrl(struct omapfb_device *fbdev)
1575{
1576 struct omapfb_platform_data *conf;
1577 char name[17];
1578 int i;
1579
1580 conf = fbdev->dev->platform_data;
1581
1582 fbdev->ctrl = NULL;
1583
1584 strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
1585 name[sizeof(name) - 1] = '\0';
1586
1587 if (strcmp(name, "internal") == 0) {
1588 fbdev->ctrl = fbdev->int_ctrl;
1589 return 0;
1590 }
1591
1592 for (i = 0; i < ARRAY_SIZE(ctrls); i++) {
1593 dev_dbg(fbdev->dev, "ctrl %s\n", ctrls[i]->name);
1594 if (strcmp(ctrls[i]->name, name) == 0) {
1595 fbdev->ctrl = ctrls[i];
1596 break;
1597 }
1598 }
1599
1600 if (fbdev->ctrl == NULL) {
1601 dev_dbg(fbdev->dev, "ctrl %s not supported\n", name);
1602 return -1;
1603 }
1604
1605 return 0;
1606}
1607
1608static void check_required_callbacks(struct omapfb_device *fbdev)
1609{
1610#define _C(x) (fbdev->ctrl->x != NULL)
1611#define _P(x) (fbdev->panel->x != NULL)
1612 BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
1613 BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
1614 _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
1615 _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
1616 _P(get_caps)));
1617#undef _P
1618#undef _C
1619}
1620
1621/*
1622 * Called by LDM binding to probe and attach a new device.
1623 * Initialization sequence:
1624 * 1. allocate system omapfb_device structure
1625 * 2. select controller type according to platform configuration
1626 * init LCD panel
1627 * 3. init LCD controller and LCD DMA
1628 * 4. init system fb_info structure for all planes
1629 * 5. setup video mode for first plane and enable it
1630 * 6. enable LCD panel
1631 * 7. register sysfs attributes
1632 * OMAPFB_ACTIVE: register system fb_info structure for all planes
1633 */
1634static int omapfb_do_probe(struct platform_device *pdev,
1635 struct lcd_panel *panel)
1636{
1637 struct omapfb_device *fbdev = NULL;
1638 int init_state;
1639 unsigned long phz, hhz, vhz;
1640 unsigned long vram;
1641 int i;
1642 int r = 0;
1643
1644 init_state = 0;
1645
1646 if (pdev->num_resources != 0) {
1647 dev_err(&pdev->dev, "probed for an unknown device\n");
1648 r = -ENODEV;
1649 goto cleanup;
1650 }
1651
1652 if (pdev->dev.platform_data == NULL) {
1653 dev_err(&pdev->dev, "missing platform data\n");
1654 r = -ENOENT;
1655 goto cleanup;
1656 }
1657
1658 fbdev = kzalloc(sizeof(struct omapfb_device), GFP_KERNEL);
1659 if (fbdev == NULL) {
1660 dev_err(&pdev->dev,
1661 "unable to allocate memory for device info\n");
1662 r = -ENOMEM;
1663 goto cleanup;
1664 }
1665 init_state++;
1666
1667 fbdev->dev = &pdev->dev;
1668 fbdev->panel = panel;
1669 platform_set_drvdata(pdev, fbdev);
1670
1671 mutex_init(&fbdev->rqueue_mutex);
1672
1673#ifdef CONFIG_ARCH_OMAP1
1674 fbdev->int_ctrl = &omap1_int_ctrl;
1675#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1676 fbdev->ext_if = &omap1_ext_if;
1677#endif
1678#else /* OMAP2 */
1679 fbdev->int_ctrl = &omap2_int_ctrl;
1680#ifdef CONFIG_FB_OMAP_LCDC_EXTERNAL
1681 fbdev->ext_if = &omap2_ext_if;
1682#endif
1683#endif
1684 if (omapfb_find_ctrl(fbdev) < 0) {
1685 dev_err(fbdev->dev,
1686 "LCD controller not found, board not supported\n");
1687 r = -ENODEV;
1688 goto cleanup;
1689 }
1690
1691 r = fbdev->panel->init(fbdev->panel, fbdev);
1692 if (r)
1693 goto cleanup;
1694
1695 pr_info("omapfb: configured for panel %s\n", fbdev->panel->name);
1696
1697 def_vxres = def_vxres ? : fbdev->panel->x_res;
1698 def_vyres = def_vyres ? : fbdev->panel->y_res;
1699
1700 init_state++;
1701
1702 r = ctrl_init(fbdev);
1703 if (r)
1704 goto cleanup;
1705 if (fbdev->ctrl->mmap != NULL)
1706 omapfb_ops.fb_mmap = omapfb_mmap;
1707 init_state++;
1708
1709 check_required_callbacks(fbdev);
1710
1711 r = planes_init(fbdev);
1712 if (r)
1713 goto cleanup;
1714 init_state++;
1715
1716#ifdef CONFIG_FB_OMAP_DMA_TUNE
1717 /* Set DMA priority for EMIFF access to highest */
1718 if (cpu_class_is_omap1())
1719 omap_set_dma_priority(0, OMAP_DMA_PORT_EMIFF, 15);
1720#endif
1721
1722 r = ctrl_change_mode(fbdev->fb_info[0]);
1723 if (r) {
1724 dev_err(fbdev->dev, "mode setting failed\n");
1725 goto cleanup;
1726 }
1727
1728 /* GFX plane is enabled by default */
1729 r = fbdev->ctrl->enable_plane(OMAPFB_PLANE_GFX, 1);
1730 if (r)
1731 goto cleanup;
1732
1733 omapfb_set_update_mode(fbdev, manual_update ?
1734 OMAPFB_MANUAL_UPDATE : OMAPFB_AUTO_UPDATE);
1735 init_state++;
1736
1737 r = fbdev->panel->enable(fbdev->panel);
1738 if (r)
1739 goto cleanup;
1740 init_state++;
1741
1742 r = omapfb_register_sysfs(fbdev);
1743 if (r)
1744 goto cleanup;
1745 init_state++;
1746
1747 vram = 0;
1748 for (i = 0; i < fbdev->mem_desc.region_cnt; i++) {
1749 r = register_framebuffer(fbdev->fb_info[i]);
1750 if (r != 0) {
1751 dev_err(fbdev->dev,
1752 "registering framebuffer %d failed\n", i);
1753 goto cleanup;
1754 }
1755 vram += fbdev->mem_desc.region[i].size;
1756 }
1757
1758 fbdev->state = OMAPFB_ACTIVE;
1759
1760 panel = fbdev->panel;
1761 phz = panel->pixel_clock * 1000;
1762 hhz = phz * 10 / (panel->hfp + panel->x_res + panel->hbp + panel->hsw);
1763 vhz = hhz / (panel->vfp + panel->y_res + panel->vbp + panel->vsw);
1764
1765 omapfb_dev = fbdev;
1766
1767 pr_info("omapfb: Framebuffer initialized. Total vram %lu planes %d\n",
1768 vram, fbdev->mem_desc.region_cnt);
1769 pr_info("omapfb: Pixclock %lu kHz hfreq %lu.%lu kHz "
1770 "vfreq %lu.%lu Hz\n",
1771 phz / 1000, hhz / 10000, hhz % 10, vhz / 10, vhz % 10);
1772
1773 return 0;
1774
1775cleanup:
1776 omapfb_free_resources(fbdev, init_state);
1777
1778 return r;
1779}
1780
1781static int omapfb_probe(struct platform_device *pdev)
1782{
1783 BUG_ON(fbdev_pdev != NULL);
1784
1785 /* Delay actual initialization until the LCD is registered */
1786 fbdev_pdev = pdev;
1787 if (fbdev_panel != NULL)
1788 omapfb_do_probe(fbdev_pdev, fbdev_panel);
1789 return 0;
1790}
1791
1792void omapfb_register_panel(struct lcd_panel *panel)
1793{
1794 BUG_ON(fbdev_panel != NULL);
1795
1796 fbdev_panel = panel;
1797 if (fbdev_pdev != NULL)
1798 omapfb_do_probe(fbdev_pdev, fbdev_panel);
1799}
1800
1801/* Called when the device is being detached from the driver */
1802static int omapfb_remove(struct platform_device *pdev)
1803{
1804 struct omapfb_device *fbdev = platform_get_drvdata(pdev);
1805 enum omapfb_state saved_state = fbdev->state;
1806
1807 /* FIXME: wait till completion of pending events */
1808
1809 fbdev->state = OMAPFB_DISABLED;
1810 omapfb_free_resources(fbdev, saved_state);
1811
1812 return 0;
1813}
1814
1815/* PM suspend */
1816static int omapfb_suspend(struct platform_device *pdev, pm_message_t mesg)
1817{
1818 struct omapfb_device *fbdev = platform_get_drvdata(pdev);
1819
1820 omapfb_blank(VESA_POWERDOWN, fbdev->fb_info[0]);
1821
1822 return 0;
1823}
1824
1825/* PM resume */
1826static int omapfb_resume(struct platform_device *pdev)
1827{
1828 struct omapfb_device *fbdev = platform_get_drvdata(pdev);
1829
1830 omapfb_blank(VESA_NO_BLANKING, fbdev->fb_info[0]);
1831 return 0;
1832}
1833
1834static struct platform_driver omapfb_driver = {
1835 .probe = omapfb_probe,
1836 .remove = omapfb_remove,
1837 .suspend = omapfb_suspend,
1838 .resume = omapfb_resume,
1839 .driver = {
1840 .name = MODULE_NAME,
1841 .owner = THIS_MODULE,
1842 },
1843};
1844
1845#ifndef MODULE
1846
1847/* Process kernel command line parameters */
1848static int __init omapfb_setup(char *options)
1849{
1850 char *this_opt = NULL;
1851 int r = 0;
1852
1853 pr_debug("omapfb: options %s\n", options);
1854
1855 if (!options || !*options)
1856 return 0;
1857
1858 while (!r && (this_opt = strsep(&options, ",")) != NULL) {
1859 if (!strncmp(this_opt, "accel", 5))
1860 def_accel = 1;
1861 else if (!strncmp(this_opt, "vram:", 5)) {
1862 char *suffix;
1863 unsigned long vram;
1864 vram = (simple_strtoul(this_opt + 5, &suffix, 0));
1865 switch (suffix[0]) {
1866 case '\0':
1867 break;
1868 case 'm':
1869 case 'M':
1870 vram *= 1024;
1871 /* Fall through */
1872 case 'k':
1873 case 'K':
1874 vram *= 1024;
1875 break;
1876 default:
1877 pr_debug("omapfb: invalid vram suffix %c\n",
1878 suffix[0]);
1879 r = -1;
1880 }
1881 def_vram[def_vram_cnt++] = vram;
1882 }
1883 else if (!strncmp(this_opt, "vxres:", 6))
1884 def_vxres = simple_strtoul(this_opt + 6, NULL, 0);
1885 else if (!strncmp(this_opt, "vyres:", 6))
1886 def_vyres = simple_strtoul(this_opt + 6, NULL, 0);
1887 else if (!strncmp(this_opt, "rotate:", 7))
1888 def_rotate = (simple_strtoul(this_opt + 7, NULL, 0));
1889 else if (!strncmp(this_opt, "mirror:", 7))
1890 def_mirror = (simple_strtoul(this_opt + 7, NULL, 0));
1891 else if (!strncmp(this_opt, "manual_update", 13))
1892 manual_update = 1;
1893 else {
1894 pr_debug("omapfb: invalid option\n");
1895 r = -1;
1896 }
1897 }
1898
1899 return r;
1900}
1901
1902#endif
1903
1904/* Register both the driver and the device */
1905static int __init omapfb_init(void)
1906{
1907#ifndef MODULE
1908 char *option;
1909
1910 if (fb_get_options("omapfb", &option))
1911 return -ENODEV;
1912 omapfb_setup(option);
1913#endif
1914 /* Register the driver with LDM */
1915 if (platform_driver_register(&omapfb_driver)) {
1916 pr_debug("failed to register omapfb driver\n");
1917 return -ENODEV;
1918 }
1919
1920 return 0;
1921}
1922
1923static void __exit omapfb_cleanup(void)
1924{
1925 platform_driver_unregister(&omapfb_driver);
1926}
1927
1928module_param_named(accel, def_accel, uint, 0664);
1929module_param_array_named(vram, def_vram, ulong, &def_vram_cnt, 0664);
1930module_param_named(vxres, def_vxres, long, 0664);
1931module_param_named(vyres, def_vyres, long, 0664);
1932module_param_named(rotate, def_rotate, uint, 0664);
1933module_param_named(mirror, def_mirror, uint, 0664);
1934module_param_named(manual_update, manual_update, bool, 0664);
1935
1936module_init(omapfb_init);
1937module_exit(omapfb_cleanup);
1938
1939MODULE_DESCRIPTION("TI OMAP framebuffer driver");
1940MODULE_AUTHOR("Imre Deak <imre.deak@nokia.com>");
1941MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
new file mode 100644
index 000000000000..2b4269813b22
--- /dev/null
+++ b/drivers/video/omap/rfbi.c
@@ -0,0 +1,588 @@
1/*
2 * OMAP2 Remote Frame Buffer Interface support
3 *
4 * Copyright (C) 2005 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * Imre Deak <imre.deak@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 */
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/i2c.h>
25#include <linux/err.h>
26#include <linux/interrupt.h>
27#include <linux/clk.h>
28#include <linux/io.h>
29
30#include <asm/arch/omapfb.h>
31
32#include "dispc.h"
33
34/* To work around an RFBI transfer rate limitation */
35#define OMAP_RFBI_RATE_LIMIT 1
36
37#define RFBI_BASE 0x48050800
38#define RFBI_REVISION 0x0000
39#define RFBI_SYSCONFIG 0x0010
40#define RFBI_SYSSTATUS 0x0014
41#define RFBI_CONTROL 0x0040
42#define RFBI_PIXEL_CNT 0x0044
43#define RFBI_LINE_NUMBER 0x0048
44#define RFBI_CMD 0x004c
45#define RFBI_PARAM 0x0050
46#define RFBI_DATA 0x0054
47#define RFBI_READ 0x0058
48#define RFBI_STATUS 0x005c
49#define RFBI_CONFIG0 0x0060
50#define RFBI_ONOFF_TIME0 0x0064
51#define RFBI_CYCLE_TIME0 0x0068
52#define RFBI_DATA_CYCLE1_0 0x006c
53#define RFBI_DATA_CYCLE2_0 0x0070
54#define RFBI_DATA_CYCLE3_0 0x0074
55#define RFBI_VSYNC_WIDTH 0x0090
56#define RFBI_HSYNC_WIDTH 0x0094
57
58#define DISPC_BASE 0x48050400
59#define DISPC_CONTROL 0x0040
60
61static struct {
62 u32 base;
63 void (*lcdc_callback)(void *data);
64 void *lcdc_callback_data;
65 unsigned long l4_khz;
66 int bits_per_cycle;
67 struct omapfb_device *fbdev;
68 struct clk *dss_ick;
69 struct clk *dss1_fck;
70 unsigned tearsync_pin_cnt;
71 unsigned tearsync_mode;
72} rfbi;
73
74static inline void rfbi_write_reg(int idx, u32 val)
75{
76 __raw_writel(val, rfbi.base + idx);
77}
78
79static inline u32 rfbi_read_reg(int idx)
80{
81 return __raw_readl(rfbi.base + idx);
82}
83
84static int rfbi_get_clocks(void)
85{
86 if (IS_ERR((rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "dss_ick")))) {
87 dev_err(rfbi.fbdev->dev, "can't get dss_ick");
88 return PTR_ERR(rfbi.dss_ick);
89 }
90
91 if (IS_ERR((rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck")))) {
92 dev_err(rfbi.fbdev->dev, "can't get dss1_fck");
93 clk_put(rfbi.dss_ick);
94 return PTR_ERR(rfbi.dss1_fck);
95 }
96
97 return 0;
98}
99
100static void rfbi_put_clocks(void)
101{
102 clk_put(rfbi.dss1_fck);
103 clk_put(rfbi.dss_ick);
104}
105
106static void rfbi_enable_clocks(int enable)
107{
108 if (enable) {
109 clk_enable(rfbi.dss_ick);
110 clk_enable(rfbi.dss1_fck);
111 } else {
112 clk_disable(rfbi.dss1_fck);
113 clk_disable(rfbi.dss_ick);
114 }
115}
116
117
118#ifdef VERBOSE
119static void rfbi_print_timings(void)
120{
121 u32 l;
122 u32 time;
123
124 l = rfbi_read_reg(RFBI_CONFIG0);
125 time = 1000000000 / rfbi.l4_khz;
126 if (l & (1 << 4))
127 time *= 2;
128
129 dev_dbg(rfbi.fbdev->dev, "Tick time %u ps\n", time);
130 l = rfbi_read_reg(RFBI_ONOFF_TIME0);
131 dev_dbg(rfbi.fbdev->dev,
132 "CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
133 "REONTIME %d, REOFFTIME %d\n",
134 l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
135 (l >> 20) & 0x0f, (l >> 24) & 0x3f);
136
137 l = rfbi_read_reg(RFBI_CYCLE_TIME0);
138 dev_dbg(rfbi.fbdev->dev,
139 "WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
140 "ACCESSTIME %d\n",
141 (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
142 (l >> 22) & 0x3f);
143}
144#else
145static void rfbi_print_timings(void) {}
146#endif
147
148static void rfbi_set_timings(const struct extif_timings *t)
149{
150 u32 l;
151
152 BUG_ON(!t->converted);
153
154 rfbi_enable_clocks(1);
155 rfbi_write_reg(RFBI_ONOFF_TIME0, t->tim[0]);
156 rfbi_write_reg(RFBI_CYCLE_TIME0, t->tim[1]);
157
158 l = rfbi_read_reg(RFBI_CONFIG0);
159 l &= ~(1 << 4);
160 l |= (t->tim[2] ? 1 : 0) << 4;
161 rfbi_write_reg(RFBI_CONFIG0, l);
162
163 rfbi_print_timings();
164 rfbi_enable_clocks(0);
165}
166
167static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
168{
169 *clk_period = 1000000000 / rfbi.l4_khz;
170 *max_clk_div = 2;
171}
172
173static int ps_to_rfbi_ticks(int time, int div)
174{
175 unsigned long tick_ps;
176 int ret;
177
178 /* Calculate in picosecs to yield more exact results */
179 tick_ps = 1000000000 / (rfbi.l4_khz) * div;
180
181 ret = (time + tick_ps - 1) / tick_ps;
182
183 return ret;
184}
185
186#ifdef OMAP_RFBI_RATE_LIMIT
187static unsigned long rfbi_get_max_tx_rate(void)
188{
189 unsigned long l4_rate, dss1_rate;
190 int min_l4_ticks = 0;
191 int i;
192
193 /* According to TI this can't be calculated so make the
194 * adjustments for a couple of known frequencies and warn for
195 * others.
196 */
197 static const struct {
198 unsigned long l4_clk; /* HZ */
199 unsigned long dss1_clk; /* HZ */
200 unsigned long min_l4_ticks;
201 } ftab[] = {
202 { 55, 132, 7, }, /* 7.86 MPix/s */
203 { 110, 110, 12, }, /* 9.16 MPix/s */
204 { 110, 132, 10, }, /* 11 Mpix/s */
205 { 120, 120, 10, }, /* 12 Mpix/s */
206 { 133, 133, 10, }, /* 13.3 Mpix/s */
207 };
208
209 l4_rate = rfbi.l4_khz / 1000;
210 dss1_rate = clk_get_rate(rfbi.dss1_fck) / 1000000;
211
212 for (i = 0; i < ARRAY_SIZE(ftab); i++) {
213 /* Use a window instead of an exact match, to account
214 * for different DPLL multiplier / divider pairs.
215 */
216 if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
217 abs(ftab[i].dss1_clk - dss1_rate) < 3) {
218 min_l4_ticks = ftab[i].min_l4_ticks;
219 break;
220 }
221 }
222 if (i == ARRAY_SIZE(ftab)) {
223 /* Can't be sure, return anyway the maximum not
224 * rate-limited. This might cause a problem only for the
225 * tearing synchronisation.
226 */
227 dev_err(rfbi.fbdev->dev,
228 "can't determine maximum RFBI transfer rate\n");
229 return rfbi.l4_khz * 1000;
230 }
231 return rfbi.l4_khz * 1000 / min_l4_ticks;
232}
233#else
234static int rfbi_get_max_tx_rate(void)
235{
236 return rfbi.l4_khz * 1000;
237}
238#endif
239
240
241static int rfbi_convert_timings(struct extif_timings *t)
242{
243 u32 l;
244 int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
245 int actim, recyc, wecyc;
246 int div = t->clk_div;
247
248 if (div <= 0 || div > 2)
249 return -1;
250
251 /* Make sure that after conversion it still holds that:
252 * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
253 * csoff > cson, csoff >= max(weoff, reoff), actim > reon
254 */
255 weon = ps_to_rfbi_ticks(t->we_on_time, div);
256 weoff = ps_to_rfbi_ticks(t->we_off_time, div);
257 if (weoff <= weon)
258 weoff = weon + 1;
259 if (weon > 0x0f)
260 return -1;
261 if (weoff > 0x3f)
262 return -1;
263
264 reon = ps_to_rfbi_ticks(t->re_on_time, div);
265 reoff = ps_to_rfbi_ticks(t->re_off_time, div);
266 if (reoff <= reon)
267 reoff = reon + 1;
268 if (reon > 0x0f)
269 return -1;
270 if (reoff > 0x3f)
271 return -1;
272
273 cson = ps_to_rfbi_ticks(t->cs_on_time, div);
274 csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
275 if (csoff <= cson)
276 csoff = cson + 1;
277 if (csoff < max(weoff, reoff))
278 csoff = max(weoff, reoff);
279 if (cson > 0x0f)
280 return -1;
281 if (csoff > 0x3f)
282 return -1;
283
284 l = cson;
285 l |= csoff << 4;
286 l |= weon << 10;
287 l |= weoff << 14;
288 l |= reon << 20;
289 l |= reoff << 24;
290
291 t->tim[0] = l;
292
293 actim = ps_to_rfbi_ticks(t->access_time, div);
294 if (actim <= reon)
295 actim = reon + 1;
296 if (actim > 0x3f)
297 return -1;
298
299 wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
300 if (wecyc < weoff)
301 wecyc = weoff;
302 if (wecyc > 0x3f)
303 return -1;
304
305 recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
306 if (recyc < reoff)
307 recyc = reoff;
308 if (recyc > 0x3f)
309 return -1;
310
311 cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
312 if (cs_pulse > 0x3f)
313 return -1;
314
315 l = wecyc;
316 l |= recyc << 6;
317 l |= cs_pulse << 12;
318 l |= actim << 22;
319
320 t->tim[1] = l;
321
322 t->tim[2] = div - 1;
323
324 t->converted = 1;
325
326 return 0;
327}
328
329static int rfbi_setup_tearsync(unsigned pin_cnt,
330 unsigned hs_pulse_time, unsigned vs_pulse_time,
331 int hs_pol_inv, int vs_pol_inv, int extif_div)
332{
333 int hs, vs;
334 int min;
335 u32 l;
336
337 if (pin_cnt != 1 && pin_cnt != 2)
338 return -EINVAL;
339
340 hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
341 vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
342 if (hs < 2)
343 return -EDOM;
344 if (pin_cnt == 2)
345 min = 2;
346 else
347 min = 4;
348 if (vs < min)
349 return -EDOM;
350 if (vs == hs)
351 return -EINVAL;
352 rfbi.tearsync_pin_cnt = pin_cnt;
353 dev_dbg(rfbi.fbdev->dev,
354 "setup_tearsync: pins %d hs %d vs %d hs_inv %d vs_inv %d\n",
355 pin_cnt, hs, vs, hs_pol_inv, vs_pol_inv);
356
357 rfbi_enable_clocks(1);
358 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
359 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
360
361 l = rfbi_read_reg(RFBI_CONFIG0);
362 if (hs_pol_inv)
363 l &= ~(1 << 21);
364 else
365 l |= 1 << 21;
366 if (vs_pol_inv)
367 l &= ~(1 << 20);
368 else
369 l |= 1 << 20;
370 rfbi_enable_clocks(0);
371
372 return 0;
373}
374
375static int rfbi_enable_tearsync(int enable, unsigned line)
376{
377 u32 l;
378
379 dev_dbg(rfbi.fbdev->dev, "tearsync %d line %d mode %d\n",
380 enable, line, rfbi.tearsync_mode);
381 if (line > (1 << 11) - 1)
382 return -EINVAL;
383
384 rfbi_enable_clocks(1);
385 l = rfbi_read_reg(RFBI_CONFIG0);
386 l &= ~(0x3 << 2);
387 if (enable) {
388 rfbi.tearsync_mode = rfbi.tearsync_pin_cnt;
389 l |= rfbi.tearsync_mode << 2;
390 } else
391 rfbi.tearsync_mode = 0;
392 rfbi_write_reg(RFBI_CONFIG0, l);
393 rfbi_write_reg(RFBI_LINE_NUMBER, line);
394 rfbi_enable_clocks(0);
395
396 return 0;
397}
398
399static void rfbi_write_command(const void *buf, unsigned int len)
400{
401 rfbi_enable_clocks(1);
402 if (rfbi.bits_per_cycle == 16) {
403 const u16 *w = buf;
404 BUG_ON(len & 1);
405 for (; len; len -= 2)
406 rfbi_write_reg(RFBI_CMD, *w++);
407 } else {
408 const u8 *b = buf;
409 BUG_ON(rfbi.bits_per_cycle != 8);
410 for (; len; len--)
411 rfbi_write_reg(RFBI_CMD, *b++);
412 }
413 rfbi_enable_clocks(0);
414}
415
416static void rfbi_read_data(void *buf, unsigned int len)
417{
418 rfbi_enable_clocks(1);
419 if (rfbi.bits_per_cycle == 16) {
420 u16 *w = buf;
421 BUG_ON(len & ~1);
422 for (; len; len -= 2) {
423 rfbi_write_reg(RFBI_READ, 0);
424 *w++ = rfbi_read_reg(RFBI_READ);
425 }
426 } else {
427 u8 *b = buf;
428 BUG_ON(rfbi.bits_per_cycle != 8);
429 for (; len; len--) {
430 rfbi_write_reg(RFBI_READ, 0);
431 *b++ = rfbi_read_reg(RFBI_READ);
432 }
433 }
434 rfbi_enable_clocks(0);
435}
436
437static void rfbi_write_data(const void *buf, unsigned int len)
438{
439 rfbi_enable_clocks(1);
440 if (rfbi.bits_per_cycle == 16) {
441 const u16 *w = buf;
442 BUG_ON(len & 1);
443 for (; len; len -= 2)
444 rfbi_write_reg(RFBI_PARAM, *w++);
445 } else {
446 const u8 *b = buf;
447 BUG_ON(rfbi.bits_per_cycle != 8);
448 for (; len; len--)
449 rfbi_write_reg(RFBI_PARAM, *b++);
450 }
451 rfbi_enable_clocks(0);
452}
453
454static void rfbi_transfer_area(int width, int height,
455 void (callback)(void * data), void *data)
456{
457 u32 w;
458
459 BUG_ON(callback == NULL);
460
461 rfbi_enable_clocks(1);
462 omap_dispc_set_lcd_size(width, height);
463
464 rfbi.lcdc_callback = callback;
465 rfbi.lcdc_callback_data = data;
466
467 rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
468
469 w = rfbi_read_reg(RFBI_CONTROL);
470 w |= 1; /* enable */
471 if (!rfbi.tearsync_mode)
472 w |= 1 << 4; /* internal trigger, reset by HW */
473 rfbi_write_reg(RFBI_CONTROL, w);
474
475 omap_dispc_enable_lcd_out(1);
476}
477
478static inline void _stop_transfer(void)
479{
480 u32 w;
481
482 w = rfbi_read_reg(RFBI_CONTROL);
483 rfbi_write_reg(RFBI_CONTROL, w & ~(1 << 0));
484 rfbi_enable_clocks(0);
485}
486
487static void rfbi_dma_callback(void *data)
488{
489 _stop_transfer();
490 rfbi.lcdc_callback(rfbi.lcdc_callback_data);
491}
492
493static void rfbi_set_bits_per_cycle(int bpc)
494{
495 u32 l;
496
497 rfbi_enable_clocks(1);
498 l = rfbi_read_reg(RFBI_CONFIG0);
499 l &= ~(0x03 << 0);
500
501 switch (bpc) {
502 case 8:
503 break;
504 case 16:
505 l |= 3;
506 break;
507 default:
508 BUG();
509 }
510 rfbi_write_reg(RFBI_CONFIG0, l);
511 rfbi.bits_per_cycle = bpc;
512 rfbi_enable_clocks(0);
513}
514
515static int rfbi_init(struct omapfb_device *fbdev)
516{
517 u32 l;
518 int r;
519
520 rfbi.fbdev = fbdev;
521 rfbi.base = io_p2v(RFBI_BASE);
522
523 if ((r = rfbi_get_clocks()) < 0)
524 return r;
525 rfbi_enable_clocks(1);
526
527 rfbi.l4_khz = clk_get_rate(rfbi.dss_ick) / 1000;
528
529 /* Reset */
530 rfbi_write_reg(RFBI_SYSCONFIG, 1 << 1);
531 while (!(rfbi_read_reg(RFBI_SYSSTATUS) & (1 << 0)));
532
533 l = rfbi_read_reg(RFBI_SYSCONFIG);
534 /* Enable autoidle and smart-idle */
535 l |= (1 << 0) | (2 << 3);
536 rfbi_write_reg(RFBI_SYSCONFIG, l);
537
538 /* 16-bit interface, ITE trigger mode, 16-bit data */
539 l = (0x03 << 0) | (0x00 << 2) | (0x01 << 5) | (0x02 << 7);
540 l |= (0 << 9) | (1 << 20) | (1 << 21);
541 rfbi_write_reg(RFBI_CONFIG0, l);
542
543 rfbi_write_reg(RFBI_DATA_CYCLE1_0, 0x00000010);
544
545 l = rfbi_read_reg(RFBI_CONTROL);
546 /* Select CS0, clear bypass mode */
547 l = (0x01 << 2);
548 rfbi_write_reg(RFBI_CONTROL, l);
549
550 if ((r = omap_dispc_request_irq(rfbi_dma_callback, NULL)) < 0) {
551 dev_err(fbdev->dev, "can't get DISPC irq\n");
552 rfbi_enable_clocks(0);
553 return r;
554 }
555
556 l = rfbi_read_reg(RFBI_REVISION);
557 pr_info("omapfb: RFBI version %d.%d initialized\n",
558 (l >> 4) & 0x0f, l & 0x0f);
559
560 rfbi_enable_clocks(0);
561
562 return 0;
563}
564
565static void rfbi_cleanup(void)
566{
567 omap_dispc_free_irq();
568 rfbi_put_clocks();
569}
570
571const struct lcd_ctrl_extif omap2_ext_if = {
572 .init = rfbi_init,
573 .cleanup = rfbi_cleanup,
574 .get_clk_info = rfbi_get_clk_info,
575 .get_max_tx_rate = rfbi_get_max_tx_rate,
576 .set_bits_per_cycle = rfbi_set_bits_per_cycle,
577 .convert_timings = rfbi_convert_timings,
578 .set_timings = rfbi_set_timings,
579 .write_command = rfbi_write_command,
580 .read_data = rfbi_read_data,
581 .write_data = rfbi_write_data,
582 .transfer_area = rfbi_transfer_area,
583 .setup_tearsync = rfbi_setup_tearsync,
584 .enable_tearsync = rfbi_enable_tearsync,
585
586 .max_transmit_size = (u32) ~0,
587};
588
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
new file mode 100644
index 000000000000..81dbcf53cf0e
--- /dev/null
+++ b/drivers/video/omap/sossi.c
@@ -0,0 +1,686 @@
1/*
2 * OMAP1 Special OptimiSed Screen Interface support
3 *
4 * Copyright (C) 2004-2005 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21#include <linux/module.h>
22#include <linux/mm.h>
23#include <linux/clk.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26
27#include <asm/arch/dma.h>
28#include <asm/arch/omapfb.h>
29
30#include "lcdc.h"
31
32#define MODULE_NAME "omapfb-sossi"
33
34#define OMAP_SOSSI_BASE 0xfffbac00
35#define SOSSI_ID_REG 0x00
36#define SOSSI_INIT1_REG 0x04
37#define SOSSI_INIT2_REG 0x08
38#define SOSSI_INIT3_REG 0x0c
39#define SOSSI_FIFO_REG 0x10
40#define SOSSI_REOTABLE_REG 0x14
41#define SOSSI_TEARING_REG 0x18
42#define SOSSI_INIT1B_REG 0x1c
43#define SOSSI_FIFOB_REG 0x20
44
45#define DMA_GSCR 0xfffedc04
46#define DMA_LCD_CCR 0xfffee3c2
47#define DMA_LCD_CTRL 0xfffee3c4
48#define DMA_LCD_LCH_CTRL 0xfffee3ea
49
50#define CONF_SOSSI_RESET_R (1 << 23)
51
52#define RD_ACCESS 0
53#define WR_ACCESS 1
54
55#define SOSSI_MAX_XMIT_BYTES (512 * 1024)
56
57static struct {
58 void __iomem *base;
59 struct clk *fck;
60 unsigned long fck_hz;
61 spinlock_t lock;
62 int bus_pick_count;
63 int bus_pick_width;
64 int tearsync_mode;
65 int tearsync_line;
66 void (*lcdc_callback)(void *data);
67 void *lcdc_callback_data;
68 int vsync_dma_pending;
69 /* timing for read and write access */
70 int clk_div;
71 u8 clk_tw0[2];
72 u8 clk_tw1[2];
73 /*
74 * if last_access is the same as current we don't have to change
75 * the timings
76 */
77 int last_access;
78
79 struct omapfb_device *fbdev;
80} sossi;
81
82static inline u32 sossi_read_reg(int reg)
83{
84 return readl(sossi.base + reg);
85}
86
87static inline u16 sossi_read_reg16(int reg)
88{
89 return readw(sossi.base + reg);
90}
91
92static inline u8 sossi_read_reg8(int reg)
93{
94 return readb(sossi.base + reg);
95}
96
97static inline void sossi_write_reg(int reg, u32 value)
98{
99 writel(value, sossi.base + reg);
100}
101
102static inline void sossi_write_reg16(int reg, u16 value)
103{
104 writew(value, sossi.base + reg);
105}
106
107static inline void sossi_write_reg8(int reg, u8 value)
108{
109 writeb(value, sossi.base + reg);
110}
111
112static void sossi_set_bits(int reg, u32 bits)
113{
114 sossi_write_reg(reg, sossi_read_reg(reg) | bits);
115}
116
117static void sossi_clear_bits(int reg, u32 bits)
118{
119 sossi_write_reg(reg, sossi_read_reg(reg) & ~bits);
120}
121
122#define HZ_TO_PS(x) (1000000000 / (x / 1000))
123
124static u32 ps_to_sossi_ticks(u32 ps, int div)
125{
126 u32 clk_period = HZ_TO_PS(sossi.fck_hz) * div;
127 return (clk_period + ps - 1) / clk_period;
128}
129
130static int calc_rd_timings(struct extif_timings *t)
131{
132 u32 tw0, tw1;
133 int reon, reoff, recyc, actim;
134 int div = t->clk_div;
135
136 /*
137 * Make sure that after conversion it still holds that:
138 * reoff > reon, recyc >= reoff, actim > reon
139 */
140 reon = ps_to_sossi_ticks(t->re_on_time, div);
141 /* reon will be exactly one sossi tick */
142 if (reon > 1)
143 return -1;
144
145 reoff = ps_to_sossi_ticks(t->re_off_time, div);
146
147 if (reoff <= reon)
148 reoff = reon + 1;
149
150 tw0 = reoff - reon;
151 if (tw0 > 0x10)
152 return -1;
153
154 recyc = ps_to_sossi_ticks(t->re_cycle_time, div);
155 if (recyc <= reoff)
156 recyc = reoff + 1;
157
158 tw1 = recyc - tw0;
159 /* values less then 3 result in the SOSSI block resetting itself */
160 if (tw1 < 3)
161 tw1 = 3;
162 if (tw1 > 0x40)
163 return -1;
164
165 actim = ps_to_sossi_ticks(t->access_time, div);
166 if (actim < reoff)
167 actim++;
168 /*
169 * access time (data hold time) will be exactly one sossi
170 * tick
171 */
172 if (actim - reoff > 1)
173 return -1;
174
175 t->tim[0] = tw0 - 1;
176 t->tim[1] = tw1 - 1;
177
178 return 0;
179}
180
181static int calc_wr_timings(struct extif_timings *t)
182{
183 u32 tw0, tw1;
184 int weon, weoff, wecyc;
185 int div = t->clk_div;
186
187 /*
188 * Make sure that after conversion it still holds that:
189 * weoff > weon, wecyc >= weoff
190 */
191 weon = ps_to_sossi_ticks(t->we_on_time, div);
192 /* weon will be exactly one sossi tick */
193 if (weon > 1)
194 return -1;
195
196 weoff = ps_to_sossi_ticks(t->we_off_time, div);
197 if (weoff <= weon)
198 weoff = weon + 1;
199 tw0 = weoff - weon;
200 if (tw0 > 0x10)
201 return -1;
202
203 wecyc = ps_to_sossi_ticks(t->we_cycle_time, div);
204 if (wecyc <= weoff)
205 wecyc = weoff + 1;
206
207 tw1 = wecyc - tw0;
208 /* values less then 3 result in the SOSSI block resetting itself */
209 if (tw1 < 3)
210 tw1 = 3;
211 if (tw1 > 0x40)
212 return -1;
213
214 t->tim[2] = tw0 - 1;
215 t->tim[3] = tw1 - 1;
216
217 return 0;
218}
219
220static void _set_timing(int div, int tw0, int tw1)
221{
222 u32 l;
223
224#ifdef VERBOSE
225 dev_dbg(sossi.fbdev->dev, "Using TW0 = %d, TW1 = %d, div = %d\n",
226 tw0 + 1, tw1 + 1, div);
227#endif
228
229 clk_set_rate(sossi.fck, sossi.fck_hz / div);
230 clk_enable(sossi.fck);
231 l = sossi_read_reg(SOSSI_INIT1_REG);
232 l &= ~((0x0f << 20) | (0x3f << 24));
233 l |= (tw0 << 20) | (tw1 << 24);
234 sossi_write_reg(SOSSI_INIT1_REG, l);
235 clk_disable(sossi.fck);
236}
237
238static void _set_bits_per_cycle(int bus_pick_count, int bus_pick_width)
239{
240 u32 l;
241
242 l = sossi_read_reg(SOSSI_INIT3_REG);
243 l &= ~0x3ff;
244 l |= ((bus_pick_count - 1) << 5) | ((bus_pick_width - 1) & 0x1f);
245 sossi_write_reg(SOSSI_INIT3_REG, l);
246}
247
248static void _set_tearsync_mode(int mode, unsigned line)
249{
250 u32 l;
251
252 l = sossi_read_reg(SOSSI_TEARING_REG);
253 l &= ~(((1 << 11) - 1) << 15);
254 l |= line << 15;
255 l &= ~(0x3 << 26);
256 l |= mode << 26;
257 sossi_write_reg(SOSSI_TEARING_REG, l);
258 if (mode)
259 sossi_set_bits(SOSSI_INIT2_REG, 1 << 6); /* TE logic */
260 else
261 sossi_clear_bits(SOSSI_INIT2_REG, 1 << 6);
262}
263
264static inline void set_timing(int access)
265{
266 if (access != sossi.last_access) {
267 sossi.last_access = access;
268 _set_timing(sossi.clk_div,
269 sossi.clk_tw0[access], sossi.clk_tw1[access]);
270 }
271}
272
273static void sossi_start_transfer(void)
274{
275 /* WE */
276 sossi_clear_bits(SOSSI_INIT2_REG, 1 << 4);
277 /* CS active low */
278 sossi_clear_bits(SOSSI_INIT1_REG, 1 << 30);
279}
280
281static void sossi_stop_transfer(void)
282{
283 /* WE */
284 sossi_set_bits(SOSSI_INIT2_REG, 1 << 4);
285 /* CS active low */
286 sossi_set_bits(SOSSI_INIT1_REG, 1 << 30);
287}
288
289static void wait_end_of_write(void)
290{
291 /* Before reading we must check if some writings are going on */
292 while (!(sossi_read_reg(SOSSI_INIT2_REG) & (1 << 3)));
293}
294
295static void send_data(const void *data, unsigned int len)
296{
297 while (len >= 4) {
298 sossi_write_reg(SOSSI_FIFO_REG, *(const u32 *) data);
299 len -= 4;
300 data += 4;
301 }
302 while (len >= 2) {
303 sossi_write_reg16(SOSSI_FIFO_REG, *(const u16 *) data);
304 len -= 2;
305 data += 2;
306 }
307 while (len) {
308 sossi_write_reg8(SOSSI_FIFO_REG, *(const u8 *) data);
309 len--;
310 data++;
311 }
312}
313
314static void set_cycles(unsigned int len)
315{
316 unsigned long nr_cycles = len / (sossi.bus_pick_width / 8);
317
318 BUG_ON((nr_cycles - 1) & ~0x3ffff);
319
320 sossi_clear_bits(SOSSI_INIT1_REG, 0x3ffff);
321 sossi_set_bits(SOSSI_INIT1_REG, (nr_cycles - 1) & 0x3ffff);
322}
323
324static int sossi_convert_timings(struct extif_timings *t)
325{
326 int r = 0;
327 int div = t->clk_div;
328
329 t->converted = 0;
330
331 if (div <= 0 || div > 8)
332 return -1;
333
334 /* no CS on SOSSI, so ignore cson, csoff, cs_pulsewidth */
335 if ((r = calc_rd_timings(t)) < 0)
336 return r;
337
338 if ((r = calc_wr_timings(t)) < 0)
339 return r;
340
341 t->tim[4] = div;
342
343 t->converted = 1;
344
345 return 0;
346}
347
348static void sossi_set_timings(const struct extif_timings *t)
349{
350 BUG_ON(!t->converted);
351
352 sossi.clk_tw0[RD_ACCESS] = t->tim[0];
353 sossi.clk_tw1[RD_ACCESS] = t->tim[1];
354
355 sossi.clk_tw0[WR_ACCESS] = t->tim[2];
356 sossi.clk_tw1[WR_ACCESS] = t->tim[3];
357
358 sossi.clk_div = t->tim[4];
359}
360
361static void sossi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
362{
363 *clk_period = HZ_TO_PS(sossi.fck_hz);
364 *max_clk_div = 8;
365}
366
367static void sossi_set_bits_per_cycle(int bpc)
368{
369 int bus_pick_count, bus_pick_width;
370
371 /*
372 * We set explicitly the the bus_pick_count as well, although
373 * with remapping/reordering disabled it will be calculated by HW
374 * as (32 / bus_pick_width).
375 */
376 switch (bpc) {
377 case 8:
378 bus_pick_count = 4;
379 bus_pick_width = 8;
380 break;
381 case 16:
382 bus_pick_count = 2;
383 bus_pick_width = 16;
384 break;
385 default:
386 BUG();
387 return;
388 }
389 sossi.bus_pick_width = bus_pick_width;
390 sossi.bus_pick_count = bus_pick_count;
391}
392
393static int sossi_setup_tearsync(unsigned pin_cnt,
394 unsigned hs_pulse_time, unsigned vs_pulse_time,
395 int hs_pol_inv, int vs_pol_inv, int div)
396{
397 int hs, vs;
398 u32 l;
399
400 if (pin_cnt != 1 || div < 1 || div > 8)
401 return -EINVAL;
402
403 hs = ps_to_sossi_ticks(hs_pulse_time, div);
404 vs = ps_to_sossi_ticks(vs_pulse_time, div);
405 if (vs < 8 || vs <= hs || vs >= (1 << 12))
406 return -EDOM;
407 vs /= 8;
408 vs--;
409 if (hs > 8)
410 hs = 8;
411 if (hs)
412 hs--;
413
414 dev_dbg(sossi.fbdev->dev,
415 "setup_tearsync: hs %d vs %d hs_inv %d vs_inv %d\n",
416 hs, vs, hs_pol_inv, vs_pol_inv);
417
418 clk_enable(sossi.fck);
419 l = sossi_read_reg(SOSSI_TEARING_REG);
420 l &= ~((1 << 15) - 1);
421 l |= vs << 3;
422 l |= hs;
423 if (hs_pol_inv)
424 l |= 1 << 29;
425 else
426 l &= ~(1 << 29);
427 if (vs_pol_inv)
428 l |= 1 << 28;
429 else
430 l &= ~(1 << 28);
431 sossi_write_reg(SOSSI_TEARING_REG, l);
432 clk_disable(sossi.fck);
433
434 return 0;
435}
436
437static int sossi_enable_tearsync(int enable, unsigned line)
438{
439 int mode;
440
441 dev_dbg(sossi.fbdev->dev, "tearsync %d line %d\n", enable, line);
442 if (line >= 1 << 11)
443 return -EINVAL;
444 if (enable) {
445 if (line)
446 mode = 2; /* HS or VS */
447 else
448 mode = 3; /* VS only */
449 } else
450 mode = 0;
451 sossi.tearsync_line = line;
452 sossi.tearsync_mode = mode;
453
454 return 0;
455}
456
457static void sossi_write_command(const void *data, unsigned int len)
458{
459 clk_enable(sossi.fck);
460 set_timing(WR_ACCESS);
461 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
462 /* CMD#/DATA */
463 sossi_clear_bits(SOSSI_INIT1_REG, 1 << 18);
464 set_cycles(len);
465 sossi_start_transfer();
466 send_data(data, len);
467 sossi_stop_transfer();
468 wait_end_of_write();
469 clk_disable(sossi.fck);
470}
471
472static void sossi_write_data(const void *data, unsigned int len)
473{
474 clk_enable(sossi.fck);
475 set_timing(WR_ACCESS);
476 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
477 /* CMD#/DATA */
478 sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
479 set_cycles(len);
480 sossi_start_transfer();
481 send_data(data, len);
482 sossi_stop_transfer();
483 wait_end_of_write();
484 clk_disable(sossi.fck);
485}
486
487static void sossi_transfer_area(int width, int height,
488 void (callback)(void *data), void *data)
489{
490 BUG_ON(callback == NULL);
491
492 sossi.lcdc_callback = callback;
493 sossi.lcdc_callback_data = data;
494
495 clk_enable(sossi.fck);
496 set_timing(WR_ACCESS);
497 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
498 _set_tearsync_mode(sossi.tearsync_mode, sossi.tearsync_line);
499 /* CMD#/DATA */
500 sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
501 set_cycles(width * height * sossi.bus_pick_width / 8);
502
503 sossi_start_transfer();
504 if (sossi.tearsync_mode) {
505 /*
506 * Wait for the sync signal and start the transfer only
507 * then. We can't seem to be able to use HW sync DMA for
508 * this since LCD DMA shows huge latencies, as if it
509 * would ignore some of the DMA requests from SoSSI.
510 */
511 unsigned long flags;
512
513 spin_lock_irqsave(&sossi.lock, flags);
514 sossi.vsync_dma_pending++;
515 spin_unlock_irqrestore(&sossi.lock, flags);
516 } else
517 /* Just start the transfer right away. */
518 omap_enable_lcd_dma();
519}
520
521static void sossi_dma_callback(void *data)
522{
523 omap_stop_lcd_dma();
524 sossi_stop_transfer();
525 clk_disable(sossi.fck);
526 sossi.lcdc_callback(sossi.lcdc_callback_data);
527}
528
529static void sossi_read_data(void *data, unsigned int len)
530{
531 clk_enable(sossi.fck);
532 set_timing(RD_ACCESS);
533 _set_bits_per_cycle(sossi.bus_pick_count, sossi.bus_pick_width);
534 /* CMD#/DATA */
535 sossi_set_bits(SOSSI_INIT1_REG, 1 << 18);
536 set_cycles(len);
537 sossi_start_transfer();
538 while (len >= 4) {
539 *(u32 *) data = sossi_read_reg(SOSSI_FIFO_REG);
540 len -= 4;
541 data += 4;
542 }
543 while (len >= 2) {
544 *(u16 *) data = sossi_read_reg16(SOSSI_FIFO_REG);
545 len -= 2;
546 data += 2;
547 }
548 while (len) {
549 *(u8 *) data = sossi_read_reg8(SOSSI_FIFO_REG);
550 len--;
551 data++;
552 }
553 sossi_stop_transfer();
554 clk_disable(sossi.fck);
555}
556
557static irqreturn_t sossi_match_irq(int irq, void *data)
558{
559 unsigned long flags;
560
561 spin_lock_irqsave(&sossi.lock, flags);
562 if (sossi.vsync_dma_pending) {
563 sossi.vsync_dma_pending--;
564 omap_enable_lcd_dma();
565 }
566 spin_unlock_irqrestore(&sossi.lock, flags);
567 return IRQ_HANDLED;
568}
569
570static int sossi_init(struct omapfb_device *fbdev)
571{
572 u32 l, k;
573 struct clk *fck;
574 struct clk *dpll1out_ck;
575 int r;
576
577 sossi.base = (void __iomem *)IO_ADDRESS(OMAP_SOSSI_BASE);
578 sossi.fbdev = fbdev;
579 spin_lock_init(&sossi.lock);
580
581 dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out");
582 if (IS_ERR(dpll1out_ck)) {
583 dev_err(fbdev->dev, "can't get DPLL1OUT clock\n");
584 return PTR_ERR(dpll1out_ck);
585 }
586 /*
587 * We need the parent clock rate, which we might divide further
588 * depending on the timing requirements of the controller. See
589 * _set_timings.
590 */
591 sossi.fck_hz = clk_get_rate(dpll1out_ck);
592 clk_put(dpll1out_ck);
593
594 fck = clk_get(fbdev->dev, "ck_sossi");
595 if (IS_ERR(fck)) {
596 dev_err(fbdev->dev, "can't get SoSSI functional clock\n");
597 return PTR_ERR(fck);
598 }
599 sossi.fck = fck;
600
601 /* Reset and enable the SoSSI module */
602 l = omap_readl(MOD_CONF_CTRL_1);
603 l |= CONF_SOSSI_RESET_R;
604 omap_writel(l, MOD_CONF_CTRL_1);
605 l &= ~CONF_SOSSI_RESET_R;
606 omap_writel(l, MOD_CONF_CTRL_1);
607
608 clk_enable(sossi.fck);
609 l = omap_readl(ARM_IDLECT2);
610 l &= ~(1 << 8); /* DMACK_REQ */
611 omap_writel(l, ARM_IDLECT2);
612
613 l = sossi_read_reg(SOSSI_INIT2_REG);
614 /* Enable and reset the SoSSI block */
615 l |= (1 << 0) | (1 << 1);
616 sossi_write_reg(SOSSI_INIT2_REG, l);
617 /* Take SoSSI out of reset */
618 l &= ~(1 << 1);
619 sossi_write_reg(SOSSI_INIT2_REG, l);
620
621 sossi_write_reg(SOSSI_ID_REG, 0);
622 l = sossi_read_reg(SOSSI_ID_REG);
623 k = sossi_read_reg(SOSSI_ID_REG);
624
625 if (l != 0x55555555 || k != 0xaaaaaaaa) {
626 dev_err(fbdev->dev,
627 "invalid SoSSI sync pattern: %08x, %08x\n", l, k);
628 r = -ENODEV;
629 goto err;
630 }
631
632 if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) {
633 dev_err(fbdev->dev, "can't get LCDC IRQ\n");
634 r = -ENODEV;
635 goto err;
636 }
637
638 l = sossi_read_reg(SOSSI_ID_REG); /* Component code */
639 l = sossi_read_reg(SOSSI_ID_REG);
640 dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n",
641 l >> 16, l & 0xffff);
642
643 l = sossi_read_reg(SOSSI_INIT1_REG);
644 l |= (1 << 19); /* DMA_MODE */
645 l &= ~(1 << 31); /* REORDERING */
646 sossi_write_reg(SOSSI_INIT1_REG, l);
647
648 if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
649 IRQT_FALLING,
650 "sossi_match", sossi.fbdev->dev)) < 0) {
651 dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
652 goto err;
653 }
654
655 clk_disable(sossi.fck);
656 return 0;
657
658err:
659 clk_disable(sossi.fck);
660 clk_put(sossi.fck);
661 return r;
662}
663
664static void sossi_cleanup(void)
665{
666 omap_lcdc_free_dma_callback();
667 clk_put(sossi.fck);
668}
669
670struct lcd_ctrl_extif omap1_ext_if = {
671 .init = sossi_init,
672 .cleanup = sossi_cleanup,
673 .get_clk_info = sossi_get_clk_info,
674 .convert_timings = sossi_convert_timings,
675 .set_timings = sossi_set_timings,
676 .set_bits_per_cycle = sossi_set_bits_per_cycle,
677 .setup_tearsync = sossi_setup_tearsync,
678 .enable_tearsync = sossi_enable_tearsync,
679 .write_command = sossi_write_command,
680 .read_data = sossi_read_data,
681 .write_data = sossi_write_data,
682 .transfer_area = sossi_transfer_area,
683
684 .max_transmit_size = SOSSI_MAX_XMIT_BYTES,
685};
686
diff --git a/drivers/video/platinumfb.c b/drivers/video/platinumfb.c
index e64f8b5d0056..8503e733a172 100644
--- a/drivers/video/platinumfb.c
+++ b/drivers/video/platinumfb.c
@@ -52,7 +52,7 @@ struct fb_info_platinum {
52 struct { 52 struct {
53 __u8 red, green, blue; 53 __u8 red, green, blue;
54 } palette[256]; 54 } palette[256];
55 u32 pseudo_palette[17]; 55 u32 pseudo_palette[16];
56 56
57 volatile struct cmap_regs __iomem *cmap_regs; 57 volatile struct cmap_regs __iomem *cmap_regs;
58 unsigned long cmap_regs_phys; 58 unsigned long cmap_regs_phys;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 0a04483aa3e0..10c0cc6e93fc 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -24,7 +24,7 @@
24 * License. See the file COPYING in the main directory of this archive for 24 * License. See the file COPYING in the main directory of this archive for
25 * more details. 25 * more details.
26 * 26 *
27 * 27 *
28 */ 28 */
29 29
30#include <linux/module.h> 30#include <linux/module.h>
@@ -58,7 +58,7 @@
58#endif 58#endif
59 59
60/* 60/*
61 * Driver data 61 * Driver data
62 */ 62 */
63static char *mode __devinitdata = NULL; 63static char *mode __devinitdata = NULL;
64 64
@@ -82,12 +82,12 @@ struct pm2fb_par
82{ 82{
83 pm2type_t type; /* Board type */ 83 pm2type_t type; /* Board type */
84 unsigned char __iomem *v_regs;/* virtual address of p_regs */ 84 unsigned char __iomem *v_regs;/* virtual address of p_regs */
85 u32 memclock; /* memclock */ 85 u32 memclock; /* memclock */
86 u32 video; /* video flags before blanking */ 86 u32 video; /* video flags before blanking */
87 u32 mem_config; /* MemConfig reg at probe */ 87 u32 mem_config; /* MemConfig reg at probe */
88 u32 mem_control; /* MemControl reg at probe */ 88 u32 mem_control; /* MemControl reg at probe */
89 u32 boot_address; /* BootAddress reg at probe */ 89 u32 boot_address; /* BootAddress reg at probe */
90 u32 palette[16]; 90 u32 palette[16];
91}; 91};
92 92
93/* 93/*
@@ -95,12 +95,12 @@ struct pm2fb_par
95 * if we don't use modedb. 95 * if we don't use modedb.
96 */ 96 */
97static struct fb_fix_screeninfo pm2fb_fix __devinitdata = { 97static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
98 .id = "", 98 .id = "",
99 .type = FB_TYPE_PACKED_PIXELS, 99 .type = FB_TYPE_PACKED_PIXELS,
100 .visual = FB_VISUAL_PSEUDOCOLOR, 100 .visual = FB_VISUAL_PSEUDOCOLOR,
101 .xpanstep = 1, 101 .xpanstep = 1,
102 .ypanstep = 1, 102 .ypanstep = 1,
103 .ywrapstep = 0, 103 .ywrapstep = 0,
104 .accel = FB_ACCEL_3DLABS_PERMEDIA2, 104 .accel = FB_ACCEL_3DLABS_PERMEDIA2,
105}; 105};
106 106
@@ -109,26 +109,26 @@ static struct fb_fix_screeninfo pm2fb_fix __devinitdata = {
109 */ 109 */
110static struct fb_var_screeninfo pm2fb_var __devinitdata = { 110static struct fb_var_screeninfo pm2fb_var __devinitdata = {
111 /* "640x480, 8 bpp @ 60 Hz */ 111 /* "640x480, 8 bpp @ 60 Hz */
112 .xres = 640, 112 .xres = 640,
113 .yres = 480, 113 .yres = 480,
114 .xres_virtual = 640, 114 .xres_virtual = 640,
115 .yres_virtual = 480, 115 .yres_virtual = 480,
116 .bits_per_pixel =8, 116 .bits_per_pixel = 8,
117 .red = {0, 8, 0}, 117 .red = {0, 8, 0},
118 .blue = {0, 8, 0}, 118 .blue = {0, 8, 0},
119 .green = {0, 8, 0}, 119 .green = {0, 8, 0},
120 .activate = FB_ACTIVATE_NOW, 120 .activate = FB_ACTIVATE_NOW,
121 .height = -1, 121 .height = -1,
122 .width = -1, 122 .width = -1,
123 .accel_flags = 0, 123 .accel_flags = 0,
124 .pixclock = 39721, 124 .pixclock = 39721,
125 .left_margin = 40, 125 .left_margin = 40,
126 .right_margin = 24, 126 .right_margin = 24,
127 .upper_margin = 32, 127 .upper_margin = 32,
128 .lower_margin = 11, 128 .lower_margin = 11,
129 .hsync_len = 96, 129 .hsync_len = 96,
130 .vsync_len = 2, 130 .vsync_len = 2,
131 .vmode = FB_VMODE_NONINTERLACED 131 .vmode = FB_VMODE_NONINTERLACED
132}; 132};
133 133
134/* 134/*
@@ -166,7 +166,7 @@ static inline u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
166 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); 166 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
167 index = PM2VR_RD_INDEXED_DATA; 167 index = PM2VR_RD_INDEXED_DATA;
168 break; 168 break;
169 } 169 }
170 mb(); 170 mb();
171 return pm2_RD(p, index); 171 return pm2_RD(p, index);
172} 172}
@@ -182,7 +182,7 @@ static inline void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
182 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff); 182 pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
183 index = PM2VR_RD_INDEXED_DATA; 183 index = PM2VR_RD_INDEXED_DATA;
184 break; 184 break;
185 } 185 }
186 wmb(); 186 wmb();
187 pm2_WR(p, index, v); 187 pm2_WR(p, index, v);
188 wmb(); 188 wmb();
@@ -197,7 +197,7 @@ static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
197} 197}
198 198
199#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT 199#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
200#define WAIT_FIFO(p,a) 200#define WAIT_FIFO(p, a)
201#else 201#else
202static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a) 202static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
203{ 203{
@@ -209,7 +209,7 @@ static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
209/* 209/*
210 * partial products for the supported horizontal resolutions. 210 * partial products for the supported horizontal resolutions.
211 */ 211 */
212#define PACKPP(p0,p1,p2) (((p2) << 6) | ((p1) << 3) | (p0)) 212#define PACKPP(p0, p1, p2) (((p2) << 6) | ((p1) << 3) | (p0))
213static const struct { 213static const struct {
214 u16 width; 214 u16 width;
215 u16 pp; 215 u16 pp;
@@ -357,7 +357,7 @@ static void reset_card(struct pm2fb_par* p)
357static void reset_config(struct pm2fb_par* p) 357static void reset_config(struct pm2fb_par* p)
358{ 358{
359 WAIT_FIFO(p, 52); 359 WAIT_FIFO(p, 52);
360 pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG)& 360 pm2_WR(p, PM2R_CHIP_CONFIG, pm2_RD(p, PM2R_CHIP_CONFIG) &
361 ~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED)); 361 ~(PM2F_VGA_ENABLE|PM2F_VGA_FIXED));
362 pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L)); 362 pm2_WR(p, PM2R_BYPASS_WRITE_MASK, ~(0L));
363 pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L)); 363 pm2_WR(p, PM2R_FRAMEBUFFER_WRITE_MASK, ~(0L));
@@ -367,7 +367,7 @@ static void reset_config(struct pm2fb_par* p)
367 pm2_WR(p, PM2R_RASTERIZER_MODE, 0); 367 pm2_WR(p, PM2R_RASTERIZER_MODE, 0);
368 pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB); 368 pm2_WR(p, PM2R_DELTA_MODE, PM2F_DELTA_ORDER_RGB);
369 pm2_WR(p, PM2R_LB_READ_FORMAT, 0); 369 pm2_WR(p, PM2R_LB_READ_FORMAT, 0);
370 pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0); 370 pm2_WR(p, PM2R_LB_WRITE_FORMAT, 0);
371 pm2_WR(p, PM2R_LB_READ_MODE, 0); 371 pm2_WR(p, PM2R_LB_READ_MODE, 0);
372 pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0); 372 pm2_WR(p, PM2R_LB_SOURCE_OFFSET, 0);
373 pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0); 373 pm2_WR(p, PM2R_FB_SOURCE_OFFSET, 0);
@@ -535,7 +535,7 @@ static void set_video(struct pm2fb_par* p, u32 video) {
535 vsync = video; 535 vsync = video;
536 536
537 DPRINTK("video = 0x%x\n", video); 537 DPRINTK("video = 0x%x\n", video);
538 538
539 /* 539 /*
540 * The hardware cursor needs +vsync to recognise vert retrace. 540 * The hardware cursor needs +vsync to recognise vert retrace.
541 * We may not be using the hardware cursor, but the X Glint 541 * We may not be using the hardware cursor, but the X Glint
@@ -574,9 +574,9 @@ static void set_video(struct pm2fb_par* p, u32 video) {
574 */ 574 */
575 575
576/** 576/**
577 * pm2fb_check_var - Optional function. Validates a var passed in. 577 * pm2fb_check_var - Optional function. Validates a var passed in.
578 * @var: frame buffer variable screen structure 578 * @var: frame buffer variable screen structure
579 * @info: frame buffer structure that represents a single frame buffer 579 * @info: frame buffer structure that represents a single frame buffer
580 * 580 *
581 * Checks to see if the hardware supports the state requested by 581 * Checks to see if the hardware supports the state requested by
582 * var passed in. 582 * var passed in.
@@ -615,23 +615,23 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
615 615
616 var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */ 616 var->xres = (var->xres + 15) & ~15; /* could sometimes be 8 */
617 lpitch = var->xres * ((var->bits_per_pixel + 7)>>3); 617 lpitch = var->xres * ((var->bits_per_pixel + 7)>>3);
618 618
619 if (var->xres < 320 || var->xres > 1600) { 619 if (var->xres < 320 || var->xres > 1600) {
620 DPRINTK("width not supported: %u\n", var->xres); 620 DPRINTK("width not supported: %u\n", var->xres);
621 return -EINVAL; 621 return -EINVAL;
622 } 622 }
623 623
624 if (var->yres < 200 || var->yres > 1200) { 624 if (var->yres < 200 || var->yres > 1200) {
625 DPRINTK("height not supported: %u\n", var->yres); 625 DPRINTK("height not supported: %u\n", var->yres);
626 return -EINVAL; 626 return -EINVAL;
627 } 627 }
628 628
629 if (lpitch * var->yres_virtual > info->fix.smem_len) { 629 if (lpitch * var->yres_virtual > info->fix.smem_len) {
630 DPRINTK("no memory for screen (%ux%ux%u)\n", 630 DPRINTK("no memory for screen (%ux%ux%u)\n",
631 var->xres, var->yres_virtual, var->bits_per_pixel); 631 var->xres, var->yres_virtual, var->bits_per_pixel);
632 return -EINVAL; 632 return -EINVAL;
633 } 633 }
634 634
635 if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { 635 if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
636 DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock)); 636 DPRINTK("pixclock too high (%ldKHz)\n", PICOS2KHZ(var->pixclock));
637 return -EINVAL; 637 return -EINVAL;
@@ -672,17 +672,17 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
672 break; 672 break;
673 } 673 }
674 var->height = var->width = -1; 674 var->height = var->width = -1;
675 675
676 var->accel_flags = 0; /* Can't mmap if this is on */ 676 var->accel_flags = 0; /* Can't mmap if this is on */
677 677
678 DPRINTK("Checking graphics mode at %dx%d depth %d\n", 678 DPRINTK("Checking graphics mode at %dx%d depth %d\n",
679 var->xres, var->yres, var->bits_per_pixel); 679 var->xres, var->yres, var->bits_per_pixel);
680 return 0; 680 return 0;
681} 681}
682 682
683/** 683/**
684 * pm2fb_set_par - Alters the hardware state. 684 * pm2fb_set_par - Alters the hardware state.
685 * @info: frame buffer structure that represents a single frame buffer 685 * @info: frame buffer structure that represents a single frame buffer
686 * 686 *
687 * Using the fb_var_screeninfo in fb_info we set the resolution of the 687 * Using the fb_var_screeninfo in fb_info we set the resolution of the
688 * this particular framebuffer. 688 * this particular framebuffer.
@@ -709,7 +709,7 @@ static int pm2fb_set_par(struct fb_info *info)
709 clear_palette(par); 709 clear_palette(par);
710 if ( par->memclock ) 710 if ( par->memclock )
711 set_memclock(par, par->memclock); 711 set_memclock(par, par->memclock);
712 712
713 width = (info->var.xres_virtual + 7) & ~7; 713 width = (info->var.xres_virtual + 7) & ~7;
714 height = info->var.yres_virtual; 714 height = info->var.yres_virtual;
715 depth = (info->var.bits_per_pixel + 7) & ~7; 715 depth = (info->var.bits_per_pixel + 7) & ~7;
@@ -722,7 +722,7 @@ static int pm2fb_set_par(struct fb_info *info)
722 DPRINTK("pixclock too high (%uKHz)\n", pixclock); 722 DPRINTK("pixclock too high (%uKHz)\n", pixclock);
723 return -EINVAL; 723 return -EINVAL;
724 } 724 }
725 725
726 hsstart = to3264(info->var.right_margin, depth, data64); 726 hsstart = to3264(info->var.right_margin, depth, data64);
727 hsend = hsstart + to3264(info->var.hsync_len, depth, data64); 727 hsend = hsstart + to3264(info->var.hsync_len, depth, data64);
728 hbend = hsend + to3264(info->var.left_margin, depth, data64); 728 hbend = hsend + to3264(info->var.left_margin, depth, data64);
@@ -737,7 +737,7 @@ static int pm2fb_set_par(struct fb_info *info)
737 base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1); 737 base = to3264(info->var.yoffset * xres + info->var.xoffset, depth, 1);
738 if (data64) 738 if (data64)
739 video |= PM2F_DATA_64_ENABLE; 739 video |= PM2F_DATA_64_ENABLE;
740 740
741 if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) { 741 if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) {
742 if (lowhsync) { 742 if (lowhsync) {
743 DPRINTK("ignoring +hsync, using -hsync.\n"); 743 DPRINTK("ignoring +hsync, using -hsync.\n");
@@ -778,9 +778,9 @@ static int pm2fb_set_par(struct fb_info *info)
778 WAIT_FIFO(par, 1); 778 WAIT_FIFO(par, 1);
779 pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0); 779 pm2_WR(par, PM2VR_RD_INDEX_HIGH, 0);
780 } 780 }
781 781
782 set_aperture(par, depth); 782 set_aperture(par, depth);
783 783
784 mb(); 784 mb();
785 WAIT_FIFO(par, 19); 785 WAIT_FIFO(par, 19);
786 pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL, 786 pm2_RDAC_WR(par, PM2I_RD_COLOR_KEY_CONTROL,
@@ -847,22 +847,22 @@ static int pm2fb_set_par(struct fb_info *info)
847 set_pixclock(par, pixclock); 847 set_pixclock(par, pixclock);
848 DPRINTK("Setting graphics mode at %dx%d depth %d\n", 848 DPRINTK("Setting graphics mode at %dx%d depth %d\n",
849 info->var.xres, info->var.yres, info->var.bits_per_pixel); 849 info->var.xres, info->var.yres, info->var.bits_per_pixel);
850 return 0; 850 return 0;
851} 851}
852 852
853/** 853/**
854 * pm2fb_setcolreg - Sets a color register. 854 * pm2fb_setcolreg - Sets a color register.
855 * @regno: boolean, 0 copy local, 1 get_user() function 855 * @regno: boolean, 0 copy local, 1 get_user() function
856 * @red: frame buffer colormap structure 856 * @red: frame buffer colormap structure
857 * @green: The green value which can be up to 16 bits wide 857 * @green: The green value which can be up to 16 bits wide
858 * @blue: The blue value which can be up to 16 bits wide. 858 * @blue: The blue value which can be up to 16 bits wide.
859 * @transp: If supported the alpha value which can be up to 16 bits wide. 859 * @transp: If supported the alpha value which can be up to 16 bits wide.
860 * @info: frame buffer info structure 860 * @info: frame buffer info structure
861 * 861 *
862 * Set a single color register. The values supplied have a 16 bit 862 * Set a single color register. The values supplied have a 16 bit
863 * magnitude which needs to be scaled in this function for the hardware. 863 * magnitude which needs to be scaled in this function for the hardware.
864 * Pretty much a direct lift from tdfxfb.c. 864 * Pretty much a direct lift from tdfxfb.c.
865 * 865 *
866 * Returns negative errno on error, or zero on success. 866 * Returns negative errno on error, or zero on success.
867 */ 867 */
868static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green, 868static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
@@ -906,7 +906,7 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
906 * (blue << blue.offset) | (transp << transp.offset) 906 * (blue << blue.offset) | (transp << transp.offset)
907 * RAMDAC does not exist 907 * RAMDAC does not exist
908 */ 908 */
909#define CNVT_TOHW(val,width) ((((val)<<(width))+0x7FFF-(val))>>16) 909#define CNVT_TOHW(val, width) ((((val) << (width)) + 0x7FFF -(val)) >> 16)
910 switch (info->fix.visual) { 910 switch (info->fix.visual) {
911 case FB_VISUAL_TRUECOLOR: 911 case FB_VISUAL_TRUECOLOR:
912 case FB_VISUAL_PSEUDOCOLOR: 912 case FB_VISUAL_PSEUDOCOLOR:
@@ -916,9 +916,9 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
916 transp = CNVT_TOHW(transp, info->var.transp.length); 916 transp = CNVT_TOHW(transp, info->var.transp.length);
917 break; 917 break;
918 case FB_VISUAL_DIRECTCOLOR: 918 case FB_VISUAL_DIRECTCOLOR:
919 /* example here assumes 8 bit DAC. Might be different 919 /* example here assumes 8 bit DAC. Might be different
920 * for your hardware */ 920 * for your hardware */
921 red = CNVT_TOHW(red, 8); 921 red = CNVT_TOHW(red, 8);
922 green = CNVT_TOHW(green, 8); 922 green = CNVT_TOHW(green, 8);
923 blue = CNVT_TOHW(blue, 8); 923 blue = CNVT_TOHW(blue, 8);
924 /* hey, there is bug in transp handling... */ 924 /* hey, there is bug in transp handling... */
@@ -940,11 +940,11 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
940 940
941 switch (info->var.bits_per_pixel) { 941 switch (info->var.bits_per_pixel) {
942 case 8: 942 case 8:
943 break; 943 break;
944 case 16: 944 case 16:
945 case 24: 945 case 24:
946 case 32: 946 case 32:
947 par->palette[regno] = v; 947 par->palette[regno] = v;
948 break; 948 break;
949 } 949 }
950 return 0; 950 return 0;
@@ -956,15 +956,15 @@ static int pm2fb_setcolreg(unsigned regno, unsigned red, unsigned green,
956} 956}
957 957
958/** 958/**
959 * pm2fb_pan_display - Pans the display. 959 * pm2fb_pan_display - Pans the display.
960 * @var: frame buffer variable screen structure 960 * @var: frame buffer variable screen structure
961 * @info: frame buffer structure that represents a single frame buffer 961 * @info: frame buffer structure that represents a single frame buffer
962 * 962 *
963 * Pan (or wrap, depending on the `vmode' field) the display using the 963 * Pan (or wrap, depending on the `vmode' field) the display using the
964 * `xoffset' and `yoffset' fields of the `var' structure. 964 * `xoffset' and `yoffset' fields of the `var' structure.
965 * If the values don't fit, return -EINVAL. 965 * If the values don't fit, return -EINVAL.
966 * 966 *
967 * Returns negative errno on error, or zero on success. 967 * Returns negative errno on error, or zero on success.
968 * 968 *
969 */ 969 */
970static int pm2fb_pan_display(struct fb_var_screeninfo *var, 970static int pm2fb_pan_display(struct fb_var_screeninfo *var,
@@ -980,24 +980,24 @@ static int pm2fb_pan_display(struct fb_var_screeninfo *var,
980 depth = (depth > 32) ? 32 : depth; 980 depth = (depth > 32) ? 32 : depth;
981 base = to3264(var->yoffset * xres + var->xoffset, depth, 1); 981 base = to3264(var->yoffset * xres + var->xoffset, depth, 1);
982 WAIT_FIFO(p, 1); 982 WAIT_FIFO(p, 1);
983 pm2_WR(p, PM2R_SCREEN_BASE, base); 983 pm2_WR(p, PM2R_SCREEN_BASE, base);
984 return 0; 984 return 0;
985} 985}
986 986
987/** 987/**
988 * pm2fb_blank - Blanks the display. 988 * pm2fb_blank - Blanks the display.
989 * @blank_mode: the blank mode we want. 989 * @blank_mode: the blank mode we want.
990 * @info: frame buffer structure that represents a single frame buffer 990 * @info: frame buffer structure that represents a single frame buffer
991 * 991 *
992 * Blank the screen if blank_mode != 0, else unblank. Return 0 if 992 * Blank the screen if blank_mode != 0, else unblank. Return 0 if
993 * blanking succeeded, != 0 if un-/blanking failed due to e.g. a 993 * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
994 * video mode which doesn't support it. Implements VESA suspend 994 * video mode which doesn't support it. Implements VESA suspend
995 * and powerdown modes on hardware that supports disabling hsync/vsync: 995 * and powerdown modes on hardware that supports disabling hsync/vsync:
996 * blank_mode == 2: suspend vsync 996 * blank_mode == 2: suspend vsync
997 * blank_mode == 3: suspend hsync 997 * blank_mode == 3: suspend hsync
998 * blank_mode == 4: powerdown 998 * blank_mode == 4: powerdown
999 * 999 *
1000 * Returns negative errno on error, or zero on success. 1000 * Returns negative errno on error, or zero on success.
1001 * 1001 *
1002 */ 1002 */
1003static int pm2fb_blank(int blank_mode, struct fb_info *info) 1003static int pm2fb_blank(int blank_mode, struct fb_info *info)
@@ -1071,7 +1071,7 @@ static void pm2fb_block_op(struct fb_info* info, int copy,
1071 pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x); 1071 pm2_WR(par, PM2R_RECTANGLE_ORIGIN, (y << 16) | x);
1072 pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w); 1072 pm2_WR(par, PM2R_RECTANGLE_SIZE, (h << 16) | w);
1073 wmb(); 1073 wmb();
1074 pm2_WR(par, PM2R_RENDER,PM2F_RENDER_RECTANGLE | 1074 pm2_WR(par, PM2R_RENDER, PM2F_RENDER_RECTANGLE |
1075 (x<xsrc ? PM2F_INCREASE_X : 0) | 1075 (x<xsrc ? PM2F_INCREASE_X : 0) |
1076 (y<ysrc ? PM2F_INCREASE_Y : 0) | 1076 (y<ysrc ? PM2F_INCREASE_Y : 0) |
1077 (copy ? 0 : PM2F_RENDER_FASTFILL)); 1077 (copy ? 0 : PM2F_RENDER_FASTFILL));
@@ -1234,7 +1234,7 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
1234 DPRINTK("Adjusting register base for big-endian.\n"); 1234 DPRINTK("Adjusting register base for big-endian.\n");
1235#endif 1235#endif
1236 DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start); 1236 DPRINTK("Register base at 0x%lx\n", pm2fb_fix.mmio_start);
1237 1237
1238 /* Registers - request region and map it. */ 1238 /* Registers - request region and map it. */
1239 if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len, 1239 if ( !request_mem_region(pm2fb_fix.mmio_start, pm2fb_fix.mmio_len,
1240 "pm2fb regbase") ) { 1240 "pm2fb regbase") ) {
@@ -1317,17 +1317,17 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
1317 } 1317 }
1318 1318
1319 info->fbops = &pm2fb_ops; 1319 info->fbops = &pm2fb_ops;
1320 info->fix = pm2fb_fix; 1320 info->fix = pm2fb_fix;
1321 info->pseudo_palette = default_par->palette; 1321 info->pseudo_palette = default_par->palette;
1322 info->flags = FBINFO_DEFAULT | 1322 info->flags = FBINFO_DEFAULT |
1323 FBINFO_HWACCEL_YPAN | 1323 FBINFO_HWACCEL_YPAN |
1324 FBINFO_HWACCEL_COPYAREA | 1324 FBINFO_HWACCEL_COPYAREA |
1325 FBINFO_HWACCEL_FILLRECT; 1325 FBINFO_HWACCEL_FILLRECT;
1326 1326
1327 if (!mode) 1327 if (!mode)
1328 mode = "640x480@60"; 1328 mode = "640x480@60";
1329 1329
1330 err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8); 1330 err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
1331 if (!err || err == 4) 1331 if (!err || err == 4)
1332 info->var = pm2fb_var; 1332 info->var = pm2fb_var;
1333 1333
@@ -1348,8 +1348,8 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
1348 return 0; 1348 return 0;
1349 1349
1350 err_exit_all: 1350 err_exit_all:
1351 fb_dealloc_cmap(&info->cmap); 1351 fb_dealloc_cmap(&info->cmap);
1352 err_exit_both: 1352 err_exit_both:
1353 iounmap(info->screen_base); 1353 iounmap(info->screen_base);
1354 release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len); 1354 release_mem_region(pm2fb_fix.smem_start, pm2fb_fix.smem_len);
1355 err_exit_mmio: 1355 err_exit_mmio:
@@ -1374,7 +1374,7 @@ static void __devexit pm2fb_remove(struct pci_dev *pdev)
1374 struct pm2fb_par *par = info->par; 1374 struct pm2fb_par *par = info->par;
1375 1375
1376 unregister_framebuffer(info); 1376 unregister_framebuffer(info);
1377 1377
1378 iounmap(info->screen_base); 1378 iounmap(info->screen_base);
1379 release_mem_region(fix->smem_start, fix->smem_len); 1379 release_mem_region(fix->smem_start, fix->smem_len);
1380 iounmap(par->v_regs); 1380 iounmap(par->v_regs);
@@ -1402,9 +1402,9 @@ static struct pci_device_id pm2fb_id_table[] = {
1402 1402
1403static struct pci_driver pm2fb_driver = { 1403static struct pci_driver pm2fb_driver = {
1404 .name = "pm2fb", 1404 .name = "pm2fb",
1405 .id_table = pm2fb_id_table, 1405 .id_table = pm2fb_id_table,
1406 .probe = pm2fb_probe, 1406 .probe = pm2fb_probe,
1407 .remove = __devexit_p(pm2fb_remove), 1407 .remove = __devexit_p(pm2fb_remove),
1408}; 1408};
1409 1409
1410MODULE_DEVICE_TABLE(pci, pm2fb_id_table); 1410MODULE_DEVICE_TABLE(pci, pm2fb_id_table);
@@ -1423,7 +1423,7 @@ static int __init pm2fb_setup(char *options)
1423 if (!options || !*options) 1423 if (!options || !*options)
1424 return 0; 1424 return 0;
1425 1425
1426 while ((this_opt = strsep(&options, ",")) != NULL) { 1426 while ((this_opt = strsep(&options, ",")) != NULL) {
1427 if (!*this_opt) 1427 if (!*this_opt)
1428 continue; 1428 continue;
1429 if(!strcmp(this_opt, "lowhsync")) { 1429 if(!strcmp(this_opt, "lowhsync")) {
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index b52e883f0a52..5b3f54c0918e 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -77,7 +77,7 @@ static struct fb_fix_screeninfo pm3fb_fix __devinitdata = {
77 .xpanstep = 1, 77 .xpanstep = 1,
78 .ypanstep = 1, 78 .ypanstep = 1,
79 .ywrapstep = 0, 79 .ywrapstep = 0,
80 .accel = FB_ACCEL_NONE, 80 .accel = FB_ACCEL_3DLABS_PERMEDIA3,
81}; 81};
82 82
83/* 83/*
@@ -185,6 +185,238 @@ static inline int pm3fb_shift_bpp(unsigned bpp, int v)
185 return 0; 185 return 0;
186} 186}
187 187
188/* acceleration */
189static int pm3fb_sync(struct fb_info *info)
190{
191 struct pm3_par *par = info->par;
192
193 PM3_WAIT(par, 2);
194 PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
195 PM3_WRITE_REG(par, PM3Sync, 0);
196 mb();
197 do {
198 while ((PM3_READ_REG(par, PM3OutFIFOWords)) == 0);
199 rmb();
200 } while ((PM3_READ_REG(par, PM3OutputFifo)) != PM3Sync_Tag);
201
202 return 0;
203}
204
205static void pm3fb_init_engine(struct fb_info *info)
206{
207 struct pm3_par *par = info->par;
208 const u32 width = (info->var.xres_virtual + 7) & ~7;
209
210 PM3_WAIT(par, 50);
211 PM3_WRITE_REG(par, PM3FilterMode, PM3FilterModeSync);
212 PM3_WRITE_REG(par, PM3StatisticMode, 0x0);
213 PM3_WRITE_REG(par, PM3DeltaMode, 0x0);
214 PM3_WRITE_REG(par, PM3RasterizerMode, 0x0);
215 PM3_WRITE_REG(par, PM3ScissorMode, 0x0);
216 PM3_WRITE_REG(par, PM3LineStippleMode, 0x0);
217 PM3_WRITE_REG(par, PM3AreaStippleMode, 0x0);
218 PM3_WRITE_REG(par, PM3GIDMode, 0x0);
219 PM3_WRITE_REG(par, PM3DepthMode, 0x0);
220 PM3_WRITE_REG(par, PM3StencilMode, 0x0);
221 PM3_WRITE_REG(par, PM3StencilData, 0x0);
222 PM3_WRITE_REG(par, PM3ColorDDAMode, 0x0);
223 PM3_WRITE_REG(par, PM3TextureCoordMode, 0x0);
224 PM3_WRITE_REG(par, PM3TextureIndexMode0, 0x0);
225 PM3_WRITE_REG(par, PM3TextureIndexMode1, 0x0);
226 PM3_WRITE_REG(par, PM3TextureReadMode, 0x0);
227 PM3_WRITE_REG(par, PM3LUTMode, 0x0);
228 PM3_WRITE_REG(par, PM3TextureFilterMode, 0x0);
229 PM3_WRITE_REG(par, PM3TextureCompositeMode, 0x0);
230 PM3_WRITE_REG(par, PM3TextureApplicationMode, 0x0);
231 PM3_WRITE_REG(par, PM3TextureCompositeColorMode1, 0x0);
232 PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode1, 0x0);
233 PM3_WRITE_REG(par, PM3TextureCompositeColorMode0, 0x0);
234 PM3_WRITE_REG(par, PM3TextureCompositeAlphaMode0, 0x0);
235 PM3_WRITE_REG(par, PM3FogMode, 0x0);
236 PM3_WRITE_REG(par, PM3ChromaTestMode, 0x0);
237 PM3_WRITE_REG(par, PM3AlphaTestMode, 0x0);
238 PM3_WRITE_REG(par, PM3AntialiasMode, 0x0);
239 PM3_WRITE_REG(par, PM3YUVMode, 0x0);
240 PM3_WRITE_REG(par, PM3AlphaBlendColorMode, 0x0);
241 PM3_WRITE_REG(par, PM3AlphaBlendAlphaMode, 0x0);
242 PM3_WRITE_REG(par, PM3DitherMode, 0x0);
243 PM3_WRITE_REG(par, PM3LogicalOpMode, 0x0);
244 PM3_WRITE_REG(par, PM3RouterMode, 0x0);
245 PM3_WRITE_REG(par, PM3Window, 0x0);
246
247 PM3_WRITE_REG(par, PM3Config2D, 0x0);
248
249 PM3_WRITE_REG(par, PM3SpanColorMask, 0xffffffff);
250
251 PM3_WRITE_REG(par, PM3XBias, 0x0);
252 PM3_WRITE_REG(par, PM3YBias, 0x0);
253 PM3_WRITE_REG(par, PM3DeltaControl, 0x0);
254
255 PM3_WRITE_REG(par, PM3BitMaskPattern, 0xffffffff);
256
257 PM3_WRITE_REG(par, PM3FBDestReadEnables,
258 PM3FBDestReadEnables_E(0xff) |
259 PM3FBDestReadEnables_R(0xff) |
260 PM3FBDestReadEnables_ReferenceAlpha(0xff));
261 PM3_WRITE_REG(par, PM3FBDestReadBufferAddr0, 0x0);
262 PM3_WRITE_REG(par, PM3FBDestReadBufferOffset0, 0x0);
263 PM3_WRITE_REG(par, PM3FBDestReadBufferWidth0,
264 PM3FBDestReadBufferWidth_Width(width));
265
266 PM3_WRITE_REG(par, PM3FBDestReadMode,
267 PM3FBDestReadMode_ReadEnable |
268 PM3FBDestReadMode_Enable0);
269 PM3_WRITE_REG(par, PM3FBSourceReadBufferAddr, 0x0);
270 PM3_WRITE_REG(par, PM3FBSourceReadBufferOffset, 0x0);
271 PM3_WRITE_REG(par, PM3FBSourceReadBufferWidth,
272 PM3FBSourceReadBufferWidth_Width(width));
273 PM3_WRITE_REG(par, PM3FBSourceReadMode,
274 PM3FBSourceReadMode_Blocking |
275 PM3FBSourceReadMode_ReadEnable);
276
277 PM3_WAIT(par, 2);
278 {
279 unsigned long rm = 1;
280 switch (info->var.bits_per_pixel) {
281 case 8:
282 PM3_WRITE_REG(par, PM3PixelSize,
283 PM3PixelSize_GLOBAL_8BIT);
284 break;
285 case 16:
286 PM3_WRITE_REG(par, PM3PixelSize,
287 PM3PixelSize_GLOBAL_16BIT);
288 break;
289 case 32:
290 PM3_WRITE_REG(par, PM3PixelSize,
291 PM3PixelSize_GLOBAL_32BIT);
292 break;
293 default:
294 DPRINTK(1, "Unsupported depth %d\n",
295 info->var.bits_per_pixel);
296 break;
297 }
298 PM3_WRITE_REG(par, PM3RasterizerMode, rm);
299 }
300
301 PM3_WAIT(par, 20);
302 PM3_WRITE_REG(par, PM3FBSoftwareWriteMask, 0xffffffff);
303 PM3_WRITE_REG(par, PM3FBHardwareWriteMask, 0xffffffff);
304 PM3_WRITE_REG(par, PM3FBWriteMode,
305 PM3FBWriteMode_WriteEnable |
306 PM3FBWriteMode_OpaqueSpan |
307 PM3FBWriteMode_Enable0);
308 PM3_WRITE_REG(par, PM3FBWriteBufferAddr0, 0x0);
309 PM3_WRITE_REG(par, PM3FBWriteBufferOffset0, 0x0);
310 PM3_WRITE_REG(par, PM3FBWriteBufferWidth0,
311 PM3FBWriteBufferWidth_Width(width));
312
313 PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 0x0);
314 {
315 /* size in lines of FB */
316 unsigned long sofb = info->screen_size /
317 info->fix.line_length;
318 if (sofb > 4095)
319 PM3_WRITE_REG(par, PM3SizeOfFramebuffer, 4095);
320 else
321 PM3_WRITE_REG(par, PM3SizeOfFramebuffer, sofb);
322
323 switch (info->var.bits_per_pixel) {
324 case 8:
325 PM3_WRITE_REG(par, PM3DitherMode,
326 (1 << 10) | (2 << 3));
327 break;
328 case 16:
329 PM3_WRITE_REG(par, PM3DitherMode,
330 (1 << 10) | (1 << 3));
331 break;
332 case 32:
333 PM3_WRITE_REG(par, PM3DitherMode,
334 (1 << 10) | (0 << 3));
335 break;
336 default:
337 DPRINTK(1, "Unsupported depth %d\n",
338 info->current_par->depth);
339 break;
340 }
341 }
342
343 PM3_WRITE_REG(par, PM3dXDom, 0x0);
344 PM3_WRITE_REG(par, PM3dXSub, 0x0);
345 PM3_WRITE_REG(par, PM3dY, (1 << 16));
346 PM3_WRITE_REG(par, PM3StartXDom, 0x0);
347 PM3_WRITE_REG(par, PM3StartXSub, 0x0);
348 PM3_WRITE_REG(par, PM3StartY, 0x0);
349 PM3_WRITE_REG(par, PM3Count, 0x0);
350
351/* Disable LocalBuffer. better safe than sorry */
352 PM3_WRITE_REG(par, PM3LBDestReadMode, 0x0);
353 PM3_WRITE_REG(par, PM3LBDestReadEnables, 0x0);
354 PM3_WRITE_REG(par, PM3LBSourceReadMode, 0x0);
355 PM3_WRITE_REG(par, PM3LBWriteMode, 0x0);
356
357 pm3fb_sync(info);
358}
359
360static void pm3fb_fillrect (struct fb_info *info,
361 const struct fb_fillrect *region)
362{
363 struct pm3_par *par = info->par;
364 struct fb_fillrect modded;
365 int vxres, vyres;
366 u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
367 ((u32*)info->pseudo_palette)[region->color] : region->color;
368
369 if (info->state != FBINFO_STATE_RUNNING)
370 return;
371 if ((info->flags & FBINFO_HWACCEL_DISABLED) ||
372 region->rop != ROP_COPY ) {
373 cfb_fillrect(info, region);
374 return;
375 }
376
377 vxres = info->var.xres_virtual;
378 vyres = info->var.yres_virtual;
379
380 memcpy(&modded, region, sizeof(struct fb_fillrect));
381
382 if(!modded.width || !modded.height ||
383 modded.dx >= vxres || modded.dy >= vyres)
384 return;
385
386 if(modded.dx + modded.width > vxres)
387 modded.width = vxres - modded.dx;
388 if(modded.dy + modded.height > vyres)
389 modded.height = vyres - modded.dy;
390
391 if(info->var.bits_per_pixel == 8)
392 color |= color << 8;
393 if(info->var.bits_per_pixel <= 16)
394 color |= color << 16;
395
396 PM3_WAIT(par, 4);
397
398 PM3_WRITE_REG(par, PM3Config2D,
399 PM3Config2D_UseConstantSource |
400 PM3Config2D_ForegroundROPEnable |
401 (PM3Config2D_ForegroundROP(0x3)) | /* Ox3 is GXcopy */
402 PM3Config2D_FBWriteEnable);
403
404 PM3_WRITE_REG(par, PM3ForegroundColor, color);
405
406 PM3_WRITE_REG(par, PM3RectanglePosition,
407 (PM3RectanglePosition_XOffset(modded.dx)) |
408 (PM3RectanglePosition_YOffset(modded.dy)));
409
410 PM3_WRITE_REG(par, PM3Render2D,
411 PM3Render2D_XPositive |
412 PM3Render2D_YPositive |
413 PM3Render2D_Operation_Normal |
414 PM3Render2D_SpanOperation |
415 (PM3Render2D_Width(modded.width)) |
416 (PM3Render2D_Height(modded.height)));
417}
418/* end of acceleration functions */
419
188/* write the mode to registers */ 420/* write the mode to registers */
189static void pm3fb_write_mode(struct fb_info *info) 421static void pm3fb_write_mode(struct fb_info *info)
190{ 422{
@@ -380,8 +612,6 @@ static void pm3fb_write_mode(struct fb_info *info)
380/* 612/*
381 * hardware independent functions 613 * hardware independent functions
382 */ 614 */
383int pm3fb_init(void);
384
385static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 615static int pm3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
386{ 616{
387 u32 lpitch; 617 u32 lpitch;
@@ -528,6 +758,7 @@ static int pm3fb_set_par(struct fb_info *info)
528 pm3fb_clear_colormap(par, 0, 0, 0); 758 pm3fb_clear_colormap(par, 0, 0, 0);
529 PM3_WRITE_DAC_REG(par, PM3RD_CursorMode, 759 PM3_WRITE_DAC_REG(par, PM3RD_CursorMode,
530 PM3RD_CursorMode_CURSOR_DISABLE); 760 PM3RD_CursorMode_CURSOR_DISABLE);
761 pm3fb_init_engine(info);
531 pm3fb_write_mode(info); 762 pm3fb_write_mode(info);
532 return 0; 763 return 0;
533} 764}
@@ -675,10 +906,11 @@ static struct fb_ops pm3fb_ops = {
675 .fb_set_par = pm3fb_set_par, 906 .fb_set_par = pm3fb_set_par,
676 .fb_setcolreg = pm3fb_setcolreg, 907 .fb_setcolreg = pm3fb_setcolreg,
677 .fb_pan_display = pm3fb_pan_display, 908 .fb_pan_display = pm3fb_pan_display,
678 .fb_fillrect = cfb_fillrect, 909 .fb_fillrect = pm3fb_fillrect,
679 .fb_copyarea = cfb_copyarea, 910 .fb_copyarea = cfb_copyarea,
680 .fb_imageblit = cfb_imageblit, 911 .fb_imageblit = cfb_imageblit,
681 .fb_blank = pm3fb_blank, 912 .fb_blank = pm3fb_blank,
913 .fb_sync = pm3fb_sync,
682}; 914};
683 915
684/* ------------------------------------------------------------------------- */ 916/* ------------------------------------------------------------------------- */
@@ -847,7 +1079,8 @@ static int __devinit pm3fb_probe(struct pci_dev *dev,
847 1079
848 info->fix = pm3fb_fix; 1080 info->fix = pm3fb_fix;
849 info->pseudo_palette = par->palette; 1081 info->pseudo_palette = par->palette;
850 info->flags = FBINFO_DEFAULT;/* | FBINFO_HWACCEL_YPAN;*/ 1082 info->flags = FBINFO_DEFAULT |
1083 FBINFO_HWACCEL_FILLRECT;/* | FBINFO_HWACCEL_YPAN;*/
851 1084
852 /* 1085 /*
853 * This should give a reasonable default video mode. The following is 1086 * This should give a reasonable default video mode. The following is
@@ -935,35 +1168,12 @@ static struct pci_driver pm3fb_driver = {
935 1168
936MODULE_DEVICE_TABLE(pci, pm3fb_id_table); 1169MODULE_DEVICE_TABLE(pci, pm3fb_id_table);
937 1170
938#ifndef MODULE 1171static int __init pm3fb_init(void)
939 /*
940 * Setup
941 */
942
943/*
944 * Only necessary if your driver takes special options,
945 * otherwise we fall back on the generic fb_setup().
946 */
947static int __init pm3fb_setup(char *options)
948{ 1172{
949 /* Parse user speficied options (`video=pm3fb:') */
950 return 0;
951}
952#endif /* MODULE */
953
954int __init pm3fb_init(void)
955{
956 /*
957 * For kernel boot options (in 'video=pm3fb:<options>' format)
958 */
959#ifndef MODULE 1173#ifndef MODULE
960 char *option = NULL; 1174 if (fb_get_options("pm3fb", NULL))
961
962 if (fb_get_options("pm3fb", &option))
963 return -ENODEV; 1175 return -ENODEV;
964 pm3fb_setup(option);
965#endif 1176#endif
966
967 return pci_register_driver(&pm3fb_driver); 1177 return pci_register_driver(&pm3fb_driver);
968} 1178}
969 1179
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 08b7ffbbbbd8..3972aa8cf859 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -812,6 +812,7 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
812 812
813static int ps3fbd(void *arg) 813static int ps3fbd(void *arg)
814{ 814{
815 set_freezable();
815 while (!kthread_should_stop()) { 816 while (!kthread_should_stop()) {
816 try_to_freeze(); 817 try_to_freeze();
817 set_current_state(TASK_INTERRUPTIBLE); 818 set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index 2ba959a83eb0..0f88c30f94f8 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -333,24 +333,25 @@ static int pvr2fb_setcolreg(unsigned int regno, unsigned int red,
333 ((blue & 0xf800) >> 11); 333 ((blue & 0xf800) >> 11);
334 334
335 pvr2fb_set_pal_entry(par, regno, tmp); 335 pvr2fb_set_pal_entry(par, regno, tmp);
336 ((u16*)(info->pseudo_palette))[regno] = tmp;
337 break; 336 break;
338 case 24: /* RGB 888 */ 337 case 24: /* RGB 888 */
339 red >>= 8; green >>= 8; blue >>= 8; 338 red >>= 8; green >>= 8; blue >>= 8;
340 ((u32*)(info->pseudo_palette))[regno] = (red << 16) | (green << 8) | blue; 339 tmp = (red << 16) | (green << 8) | blue;
341 break; 340 break;
342 case 32: /* ARGB 8888 */ 341 case 32: /* ARGB 8888 */
343 red >>= 8; green >>= 8; blue >>= 8; 342 red >>= 8; green >>= 8; blue >>= 8;
344 tmp = (transp << 24) | (red << 16) | (green << 8) | blue; 343 tmp = (transp << 24) | (red << 16) | (green << 8) | blue;
345 344
346 pvr2fb_set_pal_entry(par, regno, tmp); 345 pvr2fb_set_pal_entry(par, regno, tmp);
347 ((u32*)(info->pseudo_palette))[regno] = tmp;
348 break; 346 break;
349 default: 347 default:
350 pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel); 348 pr_debug("Invalid bit depth %d?!?\n", info->var.bits_per_pixel);
351 return 1; 349 return 1;
352 } 350 }
353 351
352 if (regno < 16)
353 ((u32*)(info->pseudo_palette))[regno] = tmp;
354
354 return 0; 355 return 0;
355} 356}
356 357
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 48536c3e58a4..4beac1df617b 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -95,7 +95,7 @@ static int __init q40fb_probe(struct platform_device *dev)
95 /* mapped in q40/config.c */ 95 /* mapped in q40/config.c */
96 q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR; 96 q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR;
97 97
98 info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev); 98 info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
99 if (!info) 99 if (!info)
100 return -ENOMEM; 100 return -ENOMEM;
101 101
diff --git a/drivers/video/riva/riva_hw.c b/drivers/video/riva/riva_hw.c
index 70bfd78eca81..13307703a9f0 100644
--- a/drivers/video/riva/riva_hw.c
+++ b/drivers/video/riva/riva_hw.c
@@ -1223,6 +1223,8 @@ static int CalcVClock
1223 } 1223 }
1224 } 1224 }
1225 } 1225 }
1226
1227 /* non-zero: M/N/P/clock values assigned. zero: error (not set) */
1226 return (DeltaOld != 0xFFFFFFFF); 1228 return (DeltaOld != 0xFFFFFFFF);
1227} 1229}
1228/* 1230/*
@@ -1240,7 +1242,10 @@ int CalcStateExt
1240 int dotClock 1242 int dotClock
1241) 1243)
1242{ 1244{
1243 int pixelDepth, VClk, m, n, p; 1245 int pixelDepth;
1246 int uninitialized_var(VClk),uninitialized_var(m),
1247 uninitialized_var(n), uninitialized_var(p);
1248
1244 /* 1249 /*
1245 * Save mode parameters. 1250 * Save mode parameters.
1246 */ 1251 */
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index ebb6756aea08..4fb16240c04d 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -752,7 +752,7 @@ static int __init sgivwfb_probe(struct platform_device *dev)
752 struct fb_info *info; 752 struct fb_info *info;
753 char *monitor; 753 char *monitor;
754 754
755 info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 256, &dev->dev); 755 info = framebuffer_alloc(sizeof(struct sgivw_par) + sizeof(u32) * 16, &dev->dev);
756 if (!info) 756 if (!info)
757 return -ENOMEM; 757 return -ENOMEM;
758 par = info->par; 758 par = info->par;
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index d5e2d9c27847..d53bf6945f0c 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -479,7 +479,7 @@ struct sis_video_info {
479 struct fb_var_screeninfo default_var; 479 struct fb_var_screeninfo default_var;
480 480
481 struct fb_fix_screeninfo sisfb_fix; 481 struct fb_fix_screeninfo sisfb_fix;
482 u32 pseudo_palette[17]; 482 u32 pseudo_palette[16];
483 483
484 struct sisfb_monitor { 484 struct sisfb_monitor {
485 u16 hmin; 485 u16 hmin;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 93d07ef85276..e8ccace01252 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1405,12 +1405,18 @@ sisfb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
1405 } 1405 }
1406 break; 1406 break;
1407 case 16: 1407 case 16:
1408 if (regno >= 16)
1409 break;
1410
1408 ((u32 *)(info->pseudo_palette))[regno] = 1411 ((u32 *)(info->pseudo_palette))[regno] =
1409 (red & 0xf800) | 1412 (red & 0xf800) |
1410 ((green & 0xfc00) >> 5) | 1413 ((green & 0xfc00) >> 5) |
1411 ((blue & 0xf800) >> 11); 1414 ((blue & 0xf800) >> 11);
1412 break; 1415 break;
1413 case 32: 1416 case 32:
1417 if (regno >= 16)
1418 break;
1419
1414 red >>= 8; 1420 red >>= 8;
1415 green >>= 8; 1421 green >>= 8;
1416 blue >>= 8; 1422 blue >>= 8;
diff --git a/drivers/video/tgafb.c b/drivers/video/tgafb.c
index 5c0dab628099..89facb73edfc 100644
--- a/drivers/video/tgafb.c
+++ b/drivers/video/tgafb.c
@@ -1634,7 +1634,7 @@ tgafb_register(struct device *dev)
1634 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT; 1634 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT;
1635 info->fbops = &tgafb_ops; 1635 info->fbops = &tgafb_ops;
1636 info->screen_base = par->tga_fb_base; 1636 info->screen_base = par->tga_fb_base;
1637 info->pseudo_palette = (void *)(par + 1); 1637 info->pseudo_palette = par->palette;
1638 1638
1639 /* This should give a reasonable default video mode. */ 1639 /* This should give a reasonable default video mode. */
1640 if (tga_bus_pci) { 1640 if (tga_bus_pci) {
diff --git a/drivers/video/tridentfb.c b/drivers/video/tridentfb.c
index 55e8aa450bfa..c699864b6f4a 100644
--- a/drivers/video/tridentfb.c
+++ b/drivers/video/tridentfb.c
@@ -976,7 +976,7 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
976 return 1; 976 return 1;
977 977
978 978
979 if (bpp==8) { 979 if (bpp == 8) {
980 t_outb(0xFF,0x3C6); 980 t_outb(0xFF,0x3C6);
981 t_outb(regno,0x3C8); 981 t_outb(regno,0x3C8);
982 982
@@ -984,19 +984,21 @@ static int tridentfb_setcolreg(unsigned regno, unsigned red, unsigned green,
984 t_outb(green>>10,0x3C9); 984 t_outb(green>>10,0x3C9);
985 t_outb(blue>>10,0x3C9); 985 t_outb(blue>>10,0x3C9);
986 986
987 } else if (bpp == 16) { /* RGB 565 */ 987 } else if (regno < 16) {
988 u32 col; 988 if (bpp == 16) { /* RGB 565 */
989 989 u32 col;
990 col = (red & 0xF800) | ((green & 0xFC00) >> 5) | 990
991 ((blue & 0xF800) >> 11); 991 col = (red & 0xF800) | ((green & 0xFC00) >> 5) |
992 col |= col << 16; 992 ((blue & 0xF800) >> 11);
993 ((u32 *)(info->pseudo_palette))[regno] = col; 993 col |= col << 16;
994 } else if (bpp == 32) /* ARGB 8888 */ 994 ((u32 *)(info->pseudo_palette))[regno] = col;
995 ((u32*)info->pseudo_palette)[regno] = 995 } else if (bpp == 32) /* ARGB 8888 */
996 ((transp & 0xFF00) <<16) | 996 ((u32*)info->pseudo_palette)[regno] =
997 ((red & 0xFF00) << 8) | 997 ((transp & 0xFF00) <<16) |
998 ((green & 0xFF00)) | 998 ((red & 0xFF00) << 8) |
999 ((blue & 0xFF00)>>8); 999 ((green & 0xFF00)) |
1000 ((blue & 0xFF00)>>8);
1001 }
1000 1002
1001// debug("exit\n"); 1003// debug("exit\n");
1002 return 0; 1004 return 0;
diff --git a/drivers/video/tx3912fb.c b/drivers/video/tx3912fb.c
index 07389ba01eff..e6f7c78da68b 100644
--- a/drivers/video/tx3912fb.c
+++ b/drivers/video/tx3912fb.c
@@ -291,7 +291,7 @@ int __init tx3912fb_init(void)
291 fb_info.fbops = &tx3912fb_ops; 291 fb_info.fbops = &tx3912fb_ops;
292 fb_info.var = tx3912fb_var; 292 fb_info.var = tx3912fb_var;
293 fb_info.fix = tx3912fb_fix; 293 fb_info.fix = tx3912fb_fix;
294 fb_info.pseudo_palette = pseudo_palette; 294 fb_info.pseudo_palette = cfb8;
295 fb_info.flags = FBINFO_DEFAULT; 295 fb_info.flags = FBINFO_DEFAULT;
296 296
297 /* Clear the framebuffer */ 297 /* Clear the framebuffer */
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 30c0b948852b..4c3a63308df1 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -68,26 +68,26 @@ static const struct svga_pll vt8623_pll = {2, 127, 2, 7, 0, 3,
68 68
69/* CRT timing register sets */ 69/* CRT timing register sets */
70 70
71struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END}; 71static struct vga_regset vt8623_h_total_regs[] = {{0x00, 0, 7}, {0x36, 3, 3}, VGA_REGSET_END};
72struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END}; 72static struct vga_regset vt8623_h_display_regs[] = {{0x01, 0, 7}, VGA_REGSET_END};
73struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END}; 73static struct vga_regset vt8623_h_blank_start_regs[] = {{0x02, 0, 7}, VGA_REGSET_END};
74struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END}; 74static struct vga_regset vt8623_h_blank_end_regs[] = {{0x03, 0, 4}, {0x05, 7, 7}, {0x33, 5, 5}, VGA_REGSET_END};
75struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END}; 75static struct vga_regset vt8623_h_sync_start_regs[] = {{0x04, 0, 7}, {0x33, 4, 4}, VGA_REGSET_END};
76struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END}; 76static struct vga_regset vt8623_h_sync_end_regs[] = {{0x05, 0, 4}, VGA_REGSET_END};
77 77
78struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END}; 78static struct vga_regset vt8623_v_total_regs[] = {{0x06, 0, 7}, {0x07, 0, 0}, {0x07, 5, 5}, {0x35, 0, 0}, VGA_REGSET_END};
79struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END}; 79static struct vga_regset vt8623_v_display_regs[] = {{0x12, 0, 7}, {0x07, 1, 1}, {0x07, 6, 6}, {0x35, 2, 2}, VGA_REGSET_END};
80struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END}; 80static struct vga_regset vt8623_v_blank_start_regs[] = {{0x15, 0, 7}, {0x07, 3, 3}, {0x09, 5, 5}, {0x35, 3, 3}, VGA_REGSET_END};
81struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END}; 81static struct vga_regset vt8623_v_blank_end_regs[] = {{0x16, 0, 7}, VGA_REGSET_END};
82struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END}; 82static struct vga_regset vt8623_v_sync_start_regs[] = {{0x10, 0, 7}, {0x07, 2, 2}, {0x07, 7, 7}, {0x35, 1, 1}, VGA_REGSET_END};
83struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END}; 83static struct vga_regset vt8623_v_sync_end_regs[] = {{0x11, 0, 3}, VGA_REGSET_END};
84 84
85struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END}; 85static struct vga_regset vt8623_offset_regs[] = {{0x13, 0, 7}, {0x35, 5, 7}, VGA_REGSET_END};
86struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END}; 86static struct vga_regset vt8623_line_compare_regs[] = {{0x18, 0, 7}, {0x07, 4, 4}, {0x09, 6, 6}, {0x33, 0, 2}, {0x35, 4, 4}, VGA_REGSET_END};
87struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END}; 87static struct vga_regset vt8623_fetch_count_regs[] = {{0x1C, 0, 7}, {0x1D, 0, 1}, VGA_REGSET_END};
88struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END}; 88static struct vga_regset vt8623_start_address_regs[] = {{0x0d, 0, 7}, {0x0c, 0, 7}, {0x34, 0, 7}, {0x48, 0, 1}, VGA_REGSET_END};
89 89
90struct svga_timing_regs vt8623_timing_regs = { 90static struct svga_timing_regs vt8623_timing_regs = {
91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs, 91 vt8623_h_total_regs, vt8623_h_display_regs, vt8623_h_blank_start_regs,
92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs, 92 vt8623_h_blank_end_regs, vt8623_h_sync_start_regs, vt8623_h_sync_end_regs,
93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs, 93 vt8623_v_total_regs, vt8623_v_display_regs, vt8623_v_blank_start_regs,
@@ -903,7 +903,7 @@ static void __exit vt8623fb_cleanup(void)
903 903
904/* Driver Initialisation */ 904/* Driver Initialisation */
905 905
906int __init vt8623fb_init(void) 906static int __init vt8623fb_init(void)
907{ 907{
908 908
909#ifndef MODULE 909#ifndef MODULE
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index f5c5b760ed7b..c6332108f1c5 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -805,6 +805,7 @@ static int w1_control(void *data)
805 struct w1_master *dev, *n; 805 struct w1_master *dev, *n;
806 int have_to_wait = 0; 806 int have_to_wait = 0;
807 807
808 set_freezable();
808 while (!kthread_should_stop() || have_to_wait) { 809 while (!kthread_should_stop() || have_to_wait) {
809 have_to_wait = 0; 810 have_to_wait = 0;
810 811
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index 45c35986d49f..0a7068e30ecb 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -131,7 +131,9 @@ static void v9fs_parse_options(char *options, struct v9fs_session_info *v9ses)
131 switch (token) { 131 switch (token) {
132 case Opt_debug: 132 case Opt_debug:
133 v9ses->debug = option; 133 v9ses->debug = option;
134#ifdef CONFIG_NET_9P_DEBUG
134 p9_debug_level = option; 135 p9_debug_level = option;
136#endif
135 break; 137 break;
136 case Opt_port: 138 case Opt_port:
137 v9ses->port = option; 139 v9ses->port = option;
diff --git a/fs/Kconfig b/fs/Kconfig
index ee11f8d94085..613df554728d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1675,6 +1675,7 @@ config NFSD_V3_ACL
1675config NFSD_V4 1675config NFSD_V4
1676 bool "Provide NFSv4 server support (EXPERIMENTAL)" 1676 bool "Provide NFSv4 server support (EXPERIMENTAL)"
1677 depends on NFSD_V3 && EXPERIMENTAL 1677 depends on NFSD_V3 && EXPERIMENTAL
1678 select RPCSEC_GSS_KRB5
1678 help 1679 help
1679 If you would like to include the NFSv4 server as well as the NFSv2 1680 If you would like to include the NFSv4 server as well as the NFSv2
1680 and NFSv3 servers, say Y here. This feature is experimental, and 1681 and NFSv3 servers, say Y here. This feature is experimental, and
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index a260198306c2..b4a75880f6fd 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -139,6 +139,7 @@ err_put_filp:
139 put_filp(file); 139 put_filp(file);
140 return error; 140 return error;
141} 141}
142EXPORT_SYMBOL_GPL(anon_inode_getfd);
142 143
143/* 144/*
144 * A single inode exists for all anon_inode files. Contrary to pipes, 145 * A single inode exists for all anon_inode files. Contrary to pipes,
diff --git a/fs/attr.c b/fs/attr.c
index a0a0c7b07ba3..f8dfc2269d85 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -42,7 +42,7 @@ int inode_change_ok(struct inode *inode, struct iattr *attr)
42 42
43 /* Make sure a caller can chmod. */ 43 /* Make sure a caller can chmod. */
44 if (ia_valid & ATTR_MODE) { 44 if (ia_valid & ATTR_MODE) {
45 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 45 if (!is_owner_or_cap(inode))
46 goto error; 46 goto error;
47 /* Also check the setgid bit! */ 47 /* Also check the setgid bit! */
48 if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid : 48 if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
@@ -52,7 +52,7 @@ int inode_change_ok(struct inode *inode, struct iattr *attr)
52 52
53 /* Check for setting the inode time. */ 53 /* Check for setting the inode time. */
54 if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) { 54 if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
55 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) 55 if (!is_owner_or_cap(inode))
56 goto error; 56 goto error;
57 } 57 }
58fine: 58fine:
diff --git a/fs/buffer.c b/fs/buffer.c
index 424165b569f8..0f9006714230 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -356,7 +356,7 @@ static void free_more_memory(void)
356 for_each_online_pgdat(pgdat) { 356 for_each_online_pgdat(pgdat) {
357 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; 357 zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
358 if (*zones) 358 if (*zones)
359 try_to_free_pages(zones, GFP_NOFS); 359 try_to_free_pages(zones, 0, GFP_NOFS);
360 } 360 }
361} 361}
362 362
@@ -676,6 +676,39 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
676EXPORT_SYMBOL(mark_buffer_dirty_inode); 676EXPORT_SYMBOL(mark_buffer_dirty_inode);
677 677
678/* 678/*
679 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
680 * dirty.
681 *
682 * If warn is true, then emit a warning if the page is not uptodate and has
683 * not been truncated.
684 */
685static int __set_page_dirty(struct page *page,
686 struct address_space *mapping, int warn)
687{
688 if (unlikely(!mapping))
689 return !TestSetPageDirty(page);
690
691 if (TestSetPageDirty(page))
692 return 0;
693
694 write_lock_irq(&mapping->tree_lock);
695 if (page->mapping) { /* Race with truncate? */
696 WARN_ON_ONCE(warn && !PageUptodate(page));
697
698 if (mapping_cap_account_dirty(mapping)) {
699 __inc_zone_page_state(page, NR_FILE_DIRTY);
700 task_io_account_write(PAGE_CACHE_SIZE);
701 }
702 radix_tree_tag_set(&mapping->page_tree,
703 page_index(page), PAGECACHE_TAG_DIRTY);
704 }
705 write_unlock_irq(&mapping->tree_lock);
706 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
707
708 return 1;
709}
710
711/*
679 * Add a page to the dirty page list. 712 * Add a page to the dirty page list.
680 * 713 *
681 * It is a sad fact of life that this function is called from several places 714 * It is a sad fact of life that this function is called from several places
@@ -702,7 +735,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
702 */ 735 */
703int __set_page_dirty_buffers(struct page *page) 736int __set_page_dirty_buffers(struct page *page)
704{ 737{
705 struct address_space * const mapping = page_mapping(page); 738 struct address_space *mapping = page_mapping(page);
706 739
707 if (unlikely(!mapping)) 740 if (unlikely(!mapping))
708 return !TestSetPageDirty(page); 741 return !TestSetPageDirty(page);
@@ -719,21 +752,7 @@ int __set_page_dirty_buffers(struct page *page)
719 } 752 }
720 spin_unlock(&mapping->private_lock); 753 spin_unlock(&mapping->private_lock);
721 754
722 if (TestSetPageDirty(page)) 755 return __set_page_dirty(page, mapping, 1);
723 return 0;
724
725 write_lock_irq(&mapping->tree_lock);
726 if (page->mapping) { /* Race with truncate? */
727 if (mapping_cap_account_dirty(mapping)) {
728 __inc_zone_page_state(page, NR_FILE_DIRTY);
729 task_io_account_write(PAGE_CACHE_SIZE);
730 }
731 radix_tree_tag_set(&mapping->page_tree,
732 page_index(page), PAGECACHE_TAG_DIRTY);
733 }
734 write_unlock_irq(&mapping->tree_lock);
735 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
736 return 1;
737} 756}
738EXPORT_SYMBOL(__set_page_dirty_buffers); 757EXPORT_SYMBOL(__set_page_dirty_buffers);
739 758
@@ -982,7 +1001,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
982 struct buffer_head *bh; 1001 struct buffer_head *bh;
983 1002
984 page = find_or_create_page(inode->i_mapping, index, 1003 page = find_or_create_page(inode->i_mapping, index,
985 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); 1004 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
986 if (!page) 1005 if (!page)
987 return NULL; 1006 return NULL;
988 1007
@@ -1132,8 +1151,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1132 */ 1151 */
1133void fastcall mark_buffer_dirty(struct buffer_head *bh) 1152void fastcall mark_buffer_dirty(struct buffer_head *bh)
1134{ 1153{
1154 WARN_ON_ONCE(!buffer_uptodate(bh));
1135 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1155 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1136 __set_page_dirty_nobuffers(bh->b_page); 1156 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1137} 1157}
1138 1158
1139/* 1159/*
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b0cbf4a4ad0..bd0f2f2353ce 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -849,6 +849,7 @@ static int cifs_oplock_thread(void * dummyarg)
849 __u16 netfid; 849 __u16 netfid;
850 int rc; 850 int rc;
851 851
852 set_freezable();
852 do { 853 do {
853 if (try_to_freeze()) 854 if (try_to_freeze())
854 continue; 855 continue;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f4e92661b223..0a1b8bd1dfcb 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -363,6 +363,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
363 GFP_KERNEL); 363 GFP_KERNEL);
364 } 364 }
365 365
366 set_freezable();
366 while (!kthread_should_stop()) { 367 while (!kthread_should_stop()) {
367 if (try_to_freeze()) 368 if (try_to_freeze())
368 continue; 369 continue;
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 1d716392c3aa..96df1d51fdc3 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31#include <linux/fs.h> 31#include <linux/fs.h>
32#include <linux/exportfs.h>
32 33
33#ifdef CONFIG_CIFS_EXPERIMENTAL 34#ifdef CONFIG_CIFS_EXPERIMENTAL
34 35
diff --git a/fs/dcache.c b/fs/dcache.c
index 0e73aa0a0e8b..cb9d05056b54 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -883,6 +883,11 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
883 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 883 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
884} 884}
885 885
886static struct shrinker dcache_shrinker = {
887 .shrink = shrink_dcache_memory,
888 .seeks = DEFAULT_SEEKS,
889};
890
886/** 891/**
887 * d_alloc - allocate a dcache entry 892 * d_alloc - allocate a dcache entry
888 * @parent: parent of entry to allocate 893 * @parent: parent of entry to allocate
@@ -2115,7 +2120,7 @@ static void __init dcache_init(unsigned long mempages)
2115 dentry_cache = KMEM_CACHE(dentry, 2120 dentry_cache = KMEM_CACHE(dentry,
2116 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); 2121 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2117 2122
2118 set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory); 2123 register_shrinker(&dcache_shrinker);
2119 2124
2120 /* Hash may have been set up in dcache_init_early */ 2125 /* Hash may have been set up in dcache_init_early */
2121 if (!hashdist) 2126 if (!hashdist)
diff --git a/fs/dquot.c b/fs/dquot.c
index 8819d281500c..7e273151f589 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -538,6 +538,11 @@ static int shrink_dqcache_memory(int nr, gfp_t gfp_mask)
538 return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure; 538 return (dqstats.free_dquots / 100) * sysctl_vfs_cache_pressure;
539} 539}
540 540
541static struct shrinker dqcache_shrinker = {
542 .shrink = shrink_dqcache_memory,
543 .seeks = DEFAULT_SEEKS,
544};
545
541/* 546/*
542 * Put reference to dquot 547 * Put reference to dquot
543 * NOTE: If you change this function please check whether dqput_blocks() works right... 548 * NOTE: If you change this function please check whether dqput_blocks() works right...
@@ -1870,7 +1875,7 @@ static int __init dquot_init(void)
1870 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n", 1875 printk("Dquot-cache hash table entries: %ld (order %ld, %ld bytes)\n",
1871 nr_hash, order, (PAGE_SIZE << order)); 1876 nr_hash, order, (PAGE_SIZE << order));
1872 1877
1873 set_shrinker(DEFAULT_SEEKS, shrink_dqcache_memory); 1878 register_shrinker(&dqcache_shrinker);
1874 1879
1875 return 0; 1880 return 0;
1876} 1881}
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 83e94fedd4e9..e77a2ec71aa5 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -282,7 +282,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
282 struct dentry *lower_dentry; 282 struct dentry *lower_dentry;
283 struct vfsmount *lower_mnt; 283 struct vfsmount *lower_mnt;
284 char *encoded_name; 284 char *encoded_name;
285 unsigned int encoded_namelen; 285 int encoded_namelen;
286 struct ecryptfs_crypt_stat *crypt_stat = NULL; 286 struct ecryptfs_crypt_stat *crypt_stat = NULL;
287 struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 287 struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
288 char *page_virt = NULL; 288 char *page_virt = NULL;
@@ -473,7 +473,7 @@ static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
473 struct dentry *lower_dir_dentry; 473 struct dentry *lower_dir_dentry;
474 umode_t mode; 474 umode_t mode;
475 char *encoded_symname; 475 char *encoded_symname;
476 unsigned int encoded_symlen; 476 int encoded_symlen;
477 struct ecryptfs_crypt_stat *crypt_stat = NULL; 477 struct ecryptfs_crypt_stat *crypt_stat = NULL;
478 478
479 lower_dentry = ecryptfs_dentry_to_lower(dentry); 479 lower_dentry = ecryptfs_dentry_to_lower(dentry);
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index ed4a207fe22a..5276b19423c1 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -75,6 +75,38 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei
75 return NULL; 75 return NULL;
76} 76}
77 77
78struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp)
79{
80 __u32 *objp = vobjp;
81 unsigned long ino = objp[0];
82 __u32 generation = objp[1];
83 struct inode *inode;
84 struct dentry *result;
85
86 if (ino == 0)
87 return ERR_PTR(-ESTALE);
88 inode = iget(sb, ino);
89 if (inode == NULL)
90 return ERR_PTR(-ENOMEM);
91
92 if (is_bad_inode(inode) ||
93 (generation && inode->i_generation != generation)) {
94 result = ERR_PTR(-ESTALE);
95 goto out_iput;
96 }
97
98 result = d_alloc_anon(inode);
99 if (!result) {
100 result = ERR_PTR(-ENOMEM);
101 goto out_iput;
102 }
103 return result;
104
105 out_iput:
106 iput(inode);
107 return result;
108}
109
78struct dentry *efs_get_parent(struct dentry *child) 110struct dentry *efs_get_parent(struct dentry *child)
79{ 111{
80 struct dentry *parent; 112 struct dentry *parent;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index e0a6839e68ae..d360c81f3a72 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -11,6 +11,7 @@
11#include <linux/efs_fs.h> 11#include <linux/efs_fs.h>
12#include <linux/efs_vh.h> 12#include <linux/efs_vh.h>
13#include <linux/efs_fs_sb.h> 13#include <linux/efs_fs_sb.h>
14#include <linux/exportfs.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/buffer_head.h> 16#include <linux/buffer_head.h>
16#include <linux/vfs.h> 17#include <linux/vfs.h>
@@ -113,6 +114,7 @@ static const struct super_operations efs_superblock_operations = {
113}; 114};
114 115
115static struct export_operations efs_export_ops = { 116static struct export_operations efs_export_ops = {
117 .get_dentry = efs_get_dentry,
116 .get_parent = efs_get_parent, 118 .get_parent = efs_get_parent,
117}; 119};
118 120
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index e98f6cd7200c..8adb32a9387a 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -1,15 +1,45 @@
1 1
2#include <linux/exportfs.h>
2#include <linux/fs.h> 3#include <linux/fs.h>
3#include <linux/file.h> 4#include <linux/file.h>
4#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/mount.h>
5#include <linux/namei.h> 7#include <linux/namei.h>
6 8
7struct export_operations export_op_default; 9#define dprintk(fmt, args...) do{}while(0)
8 10
9#define CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
10 11
11#define dprintk(fmt, args...) do{}while(0) 12static int get_name(struct dentry *dentry, char *name,
13 struct dentry *child);
14
15
16static struct dentry *exportfs_get_dentry(struct super_block *sb, void *obj)
17{
18 struct dentry *result = ERR_PTR(-ESTALE);
19
20 if (sb->s_export_op->get_dentry) {
21 result = sb->s_export_op->get_dentry(sb, obj);
22 if (!result)
23 result = ERR_PTR(-ESTALE);
24 }
25
26 return result;
27}
28
29static int exportfs_get_name(struct dentry *dir, char *name,
30 struct dentry *child)
31{
32 struct export_operations *nop = dir->d_sb->s_export_op;
12 33
34 if (nop->get_name)
35 return nop->get_name(dir, name, child);
36 else
37 return get_name(dir, name, child);
38}
39
40/*
41 * Check if the dentry or any of it's aliases is acceptable.
42 */
13static struct dentry * 43static struct dentry *
14find_acceptable_alias(struct dentry *result, 44find_acceptable_alias(struct dentry *result,
15 int (*acceptable)(void *context, struct dentry *dentry), 45 int (*acceptable)(void *context, struct dentry *dentry),
@@ -17,6 +47,9 @@ find_acceptable_alias(struct dentry *result,
17{ 47{
18 struct dentry *dentry, *toput = NULL; 48 struct dentry *dentry, *toput = NULL;
19 49
50 if (acceptable(context, result))
51 return result;
52
20 spin_lock(&dcache_lock); 53 spin_lock(&dcache_lock);
21 list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) { 54 list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
22 dget_locked(dentry); 55 dget_locked(dentry);
@@ -37,130 +70,50 @@ find_acceptable_alias(struct dentry *result,
37 return NULL; 70 return NULL;
38} 71}
39 72
40/** 73/*
41 * find_exported_dentry - helper routine to implement export_operations->decode_fh 74 * Find root of a disconnected subtree and return a reference to it.
42 * @sb: The &super_block identifying the filesystem
43 * @obj: An opaque identifier of the object to be found - passed to
44 * get_inode
45 * @parent: An optional opqaue identifier of the parent of the object.
46 * @acceptable: A function used to test possible &dentries to see if they are
47 * acceptable
48 * @context: A parameter to @acceptable so that it knows on what basis to
49 * judge.
50 *
51 * find_exported_dentry is the central helper routine to enable file systems
52 * to provide the decode_fh() export_operation. It's main task is to take
53 * an &inode, find or create an appropriate &dentry structure, and possibly
54 * splice this into the dcache in the correct place.
55 *
56 * The decode_fh() operation provided by the filesystem should call
57 * find_exported_dentry() with the same parameters that it received except
58 * that instead of the file handle fragment, pointers to opaque identifiers
59 * for the object and optionally its parent are passed. The default decode_fh
60 * routine passes one pointer to the start of the filehandle fragment, and
61 * one 8 bytes into the fragment. It is expected that most filesystems will
62 * take this approach, though the offset to the parent identifier may well be
63 * different.
64 *
65 * find_exported_dentry() will call get_dentry to get an dentry pointer from
66 * the file system. If any &dentry in the d_alias list is acceptable, it will
67 * be returned. Otherwise find_exported_dentry() will attempt to splice a new
68 * &dentry into the dcache using get_name() and get_parent() to find the
69 * appropriate place.
70 */ 75 */
71 76static struct dentry *
72struct dentry * 77find_disconnected_root(struct dentry *dentry)
73find_exported_dentry(struct super_block *sb, void *obj, void *parent,
74 int (*acceptable)(void *context, struct dentry *de),
75 void *context)
76{ 78{
77 struct dentry *result = NULL; 79 dget(dentry);
78 struct dentry *target_dir; 80 spin_lock(&dentry->d_lock);
79 int err; 81 while (!IS_ROOT(dentry) &&
80 struct export_operations *nops = sb->s_export_op; 82 (dentry->d_parent->d_flags & DCACHE_DISCONNECTED)) {
81 struct dentry *alias; 83 struct dentry *parent = dentry->d_parent;
82 int noprogress; 84 dget(parent);
83 char nbuf[NAME_MAX+1]; 85 spin_unlock(&dentry->d_lock);
84 86 dput(dentry);
85 /* 87 dentry = parent;
86 * Attempt to find the inode. 88 spin_lock(&dentry->d_lock);
87 */
88 result = CALL(sb->s_export_op,get_dentry)(sb,obj);
89 err = -ESTALE;
90 if (result == NULL)
91 goto err_out;
92 if (IS_ERR(result)) {
93 err = PTR_ERR(result);
94 goto err_out;
95 } 89 }
96 if (S_ISDIR(result->d_inode->i_mode) && 90 spin_unlock(&dentry->d_lock);
97 (result->d_flags & DCACHE_DISCONNECTED)) { 91 return dentry;
98 /* it is an unconnected directory, we must connect it */ 92}
99 ;
100 } else {
101 if (acceptable(context, result))
102 return result;
103 if (S_ISDIR(result->d_inode->i_mode)) {
104 err = -EACCES;
105 goto err_result;
106 }
107 93
108 alias = find_acceptable_alias(result, acceptable, context);
109 if (alias)
110 return alias;
111 }
112
113 /* It's a directory, or we are required to confirm the file's
114 * location in the tree based on the parent information
115 */
116 dprintk("find_exported_dentry: need to look harder for %s/%d\n",sb->s_id,*(int*)obj);
117 if (S_ISDIR(result->d_inode->i_mode))
118 target_dir = dget(result);
119 else {
120 if (parent == NULL)
121 goto err_result;
122 94
123 target_dir = CALL(sb->s_export_op,get_dentry)(sb,parent); 95/*
124 if (IS_ERR(target_dir)) 96 * Make sure target_dir is fully connected to the dentry tree.
125 err = PTR_ERR(target_dir); 97 *
126 if (target_dir == NULL || IS_ERR(target_dir)) 98 * It may already be, as the flag isn't always updated when connection happens.
127 goto err_result; 99 */
128 } 100static int
129 /* 101reconnect_path(struct super_block *sb, struct dentry *target_dir)
130 * Now we need to make sure that target_dir is properly connected. 102{
131 * It may already be, as the flag isn't always updated when connection 103 char nbuf[NAME_MAX+1];
132 * happens. 104 int noprogress = 0;
133 * So, we walk up parent links until we find a connected directory, 105 int err = -ESTALE;
134 * or we run out of directories. Then we find the parent, find
135 * the name of the child in that parent, and do a lookup.
136 * This should connect the child into the parent
137 * We then repeat.
138 */
139 106
140 /* it is possible that a confused file system might not let us complete 107 /*
108 * It is possible that a confused file system might not let us complete
141 * the path to the root. For example, if get_parent returns a directory 109 * the path to the root. For example, if get_parent returns a directory
142 * in which we cannot find a name for the child. While this implies a 110 * in which we cannot find a name for the child. While this implies a
143 * very sick filesystem we don't want it to cause knfsd to spin. Hence 111 * very sick filesystem we don't want it to cause knfsd to spin. Hence
144 * the noprogress counter. If we go through the loop 10 times (2 is 112 * the noprogress counter. If we go through the loop 10 times (2 is
145 * probably enough) without getting anywhere, we just give up 113 * probably enough) without getting anywhere, we just give up
146 */ 114 */
147 noprogress= 0;
148 while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) { 115 while (target_dir->d_flags & DCACHE_DISCONNECTED && noprogress++ < 10) {
149 struct dentry *pd = target_dir; 116 struct dentry *pd = find_disconnected_root(target_dir);
150
151 dget(pd);
152 spin_lock(&pd->d_lock);
153 while (!IS_ROOT(pd) &&
154 (pd->d_parent->d_flags&DCACHE_DISCONNECTED)) {
155 struct dentry *parent = pd->d_parent;
156
157 dget(parent);
158 spin_unlock(&pd->d_lock);
159 dput(pd);
160 pd = parent;
161 spin_lock(&pd->d_lock);
162 }
163 spin_unlock(&pd->d_lock);
164 117
165 if (!IS_ROOT(pd)) { 118 if (!IS_ROOT(pd)) {
166 /* must have found a connected parent - great */ 119 /* must have found a connected parent - great */
@@ -175,29 +128,40 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
175 spin_unlock(&pd->d_lock); 128 spin_unlock(&pd->d_lock);
176 noprogress = 0; 129 noprogress = 0;
177 } else { 130 } else {
178 /* we have hit the top of a disconnected path. Try 131 /*
179 * to find parent and connect 132 * We have hit the top of a disconnected path, try to
180 * note: racing with some other process renaming a 133 * find parent and connect.
181 * directory isn't much of a problem here. If someone 134 *
182 * renames the directory, it will end up properly 135 * Racing with some other process renaming a directory
183 * connected, which is what we want 136 * isn't much of a problem here. If someone renames
137 * the directory, it will end up properly connected,
138 * which is what we want
139 *
140 * Getting the parent can't be supported generically,
141 * the locking is too icky.
142 *
143 * Instead we just return EACCES. If server reboots
144 * or inodes get flushed, you lose
184 */ 145 */
185 struct dentry *ppd; 146 struct dentry *ppd = ERR_PTR(-EACCES);
186 struct dentry *npd; 147 struct dentry *npd;
187 148
188 mutex_lock(&pd->d_inode->i_mutex); 149 mutex_lock(&pd->d_inode->i_mutex);
189 ppd = CALL(nops,get_parent)(pd); 150 if (sb->s_export_op->get_parent)
151 ppd = sb->s_export_op->get_parent(pd);
190 mutex_unlock(&pd->d_inode->i_mutex); 152 mutex_unlock(&pd->d_inode->i_mutex);
191 153
192 if (IS_ERR(ppd)) { 154 if (IS_ERR(ppd)) {
193 err = PTR_ERR(ppd); 155 err = PTR_ERR(ppd);
194 dprintk("find_exported_dentry: get_parent of %ld failed, err %d\n", 156 dprintk("%s: get_parent of %ld failed, err %d\n",
195 pd->d_inode->i_ino, err); 157 __FUNCTION__, pd->d_inode->i_ino, err);
196 dput(pd); 158 dput(pd);
197 break; 159 break;
198 } 160 }
199 dprintk("find_exported_dentry: find name of %lu in %lu\n", pd->d_inode->i_ino, ppd->d_inode->i_ino); 161
200 err = CALL(nops,get_name)(ppd, nbuf, pd); 162 dprintk("%s: find name of %lu in %lu\n", __FUNCTION__,
163 pd->d_inode->i_ino, ppd->d_inode->i_ino);
164 err = exportfs_get_name(ppd, nbuf, pd);
201 if (err) { 165 if (err) {
202 dput(ppd); 166 dput(ppd);
203 dput(pd); 167 dput(pd);
@@ -208,13 +172,14 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
208 continue; 172 continue;
209 break; 173 break;
210 } 174 }
211 dprintk("find_exported_dentry: found name: %s\n", nbuf); 175 dprintk("%s: found name: %s\n", __FUNCTION__, nbuf);
212 mutex_lock(&ppd->d_inode->i_mutex); 176 mutex_lock(&ppd->d_inode->i_mutex);
213 npd = lookup_one_len(nbuf, ppd, strlen(nbuf)); 177 npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
214 mutex_unlock(&ppd->d_inode->i_mutex); 178 mutex_unlock(&ppd->d_inode->i_mutex);
215 if (IS_ERR(npd)) { 179 if (IS_ERR(npd)) {
216 err = PTR_ERR(npd); 180 err = PTR_ERR(npd);
217 dprintk("find_exported_dentry: lookup failed: %d\n", err); 181 dprintk("%s: lookup failed: %d\n",
182 __FUNCTION__, err);
218 dput(ppd); 183 dput(ppd);
219 dput(pd); 184 dput(pd);
220 break; 185 break;
@@ -227,7 +192,7 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
227 if (npd == pd) 192 if (npd == pd)
228 noprogress = 0; 193 noprogress = 0;
229 else 194 else
230 printk("find_exported_dentry: npd != pd\n"); 195 printk("%s: npd != pd\n", __FUNCTION__);
231 dput(npd); 196 dput(npd);
232 dput(ppd); 197 dput(ppd);
233 if (IS_ROOT(pd)) { 198 if (IS_ROOT(pd)) {
@@ -243,15 +208,101 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
243 /* something went wrong - oh-well */ 208 /* something went wrong - oh-well */
244 if (!err) 209 if (!err)
245 err = -ESTALE; 210 err = -ESTALE;
246 goto err_target; 211 return err;
247 } 212 }
248 /* if we weren't after a directory, have one more step to go */ 213
249 if (result != target_dir) { 214 return 0;
250 struct dentry *nresult; 215}
251 err = CALL(nops,get_name)(target_dir, nbuf, result); 216
217/**
218 * find_exported_dentry - helper routine to implement export_operations->decode_fh
219 * @sb: The &super_block identifying the filesystem
220 * @obj: An opaque identifier of the object to be found - passed to
221 * get_inode
222 * @parent: An optional opqaue identifier of the parent of the object.
223 * @acceptable: A function used to test possible &dentries to see if they are
224 * acceptable
225 * @context: A parameter to @acceptable so that it knows on what basis to
226 * judge.
227 *
228 * find_exported_dentry is the central helper routine to enable file systems
229 * to provide the decode_fh() export_operation. It's main task is to take
230 * an &inode, find or create an appropriate &dentry structure, and possibly
231 * splice this into the dcache in the correct place.
232 *
233 * The decode_fh() operation provided by the filesystem should call
234 * find_exported_dentry() with the same parameters that it received except
235 * that instead of the file handle fragment, pointers to opaque identifiers
236 * for the object and optionally its parent are passed. The default decode_fh
237 * routine passes one pointer to the start of the filehandle fragment, and
238 * one 8 bytes into the fragment. It is expected that most filesystems will
239 * take this approach, though the offset to the parent identifier may well be
240 * different.
241 *
242 * find_exported_dentry() will call get_dentry to get an dentry pointer from
243 * the file system. If any &dentry in the d_alias list is acceptable, it will
244 * be returned. Otherwise find_exported_dentry() will attempt to splice a new
245 * &dentry into the dcache using get_name() and get_parent() to find the
246 * appropriate place.
247 */
248
249struct dentry *
250find_exported_dentry(struct super_block *sb, void *obj, void *parent,
251 int (*acceptable)(void *context, struct dentry *de),
252 void *context)
253{
254 struct dentry *result, *alias;
255 int err = -ESTALE;
256
257 /*
258 * Attempt to find the inode.
259 */
260 result = exportfs_get_dentry(sb, obj);
261 if (IS_ERR(result))
262 return result;
263
264 if (S_ISDIR(result->d_inode->i_mode)) {
265 if (!(result->d_flags & DCACHE_DISCONNECTED)) {
266 if (acceptable(context, result))
267 return result;
268 err = -EACCES;
269 goto err_result;
270 }
271
272 err = reconnect_path(sb, result);
273 if (err)
274 goto err_result;
275 } else {
276 struct dentry *target_dir, *nresult;
277 char nbuf[NAME_MAX+1];
278
279 alias = find_acceptable_alias(result, acceptable, context);
280 if (alias)
281 return alias;
282
283 if (parent == NULL)
284 goto err_result;
285
286 target_dir = exportfs_get_dentry(sb,parent);
287 if (IS_ERR(target_dir)) {
288 err = PTR_ERR(target_dir);
289 goto err_result;
290 }
291
292 err = reconnect_path(sb, target_dir);
293 if (err) {
294 dput(target_dir);
295 goto err_result;
296 }
297
298 /*
299 * As we weren't after a directory, have one more step to go.
300 */
301 err = exportfs_get_name(target_dir, nbuf, result);
252 if (!err) { 302 if (!err) {
253 mutex_lock(&target_dir->d_inode->i_mutex); 303 mutex_lock(&target_dir->d_inode->i_mutex);
254 nresult = lookup_one_len(nbuf, target_dir, strlen(nbuf)); 304 nresult = lookup_one_len(nbuf, target_dir,
305 strlen(nbuf));
255 mutex_unlock(&target_dir->d_inode->i_mutex); 306 mutex_unlock(&target_dir->d_inode->i_mutex);
256 if (!IS_ERR(nresult)) { 307 if (!IS_ERR(nresult)) {
257 if (nresult->d_inode) { 308 if (nresult->d_inode) {
@@ -261,11 +312,8 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
261 dput(nresult); 312 dput(nresult);
262 } 313 }
263 } 314 }
315 dput(target_dir);
264 } 316 }
265 dput(target_dir);
266 /* now result is properly connected, it is our best bet */
267 if (acceptable(context, result))
268 return result;
269 317
270 alias = find_acceptable_alias(result, acceptable, context); 318 alias = find_acceptable_alias(result, acceptable, context);
271 if (alias) 319 if (alias)
@@ -275,32 +323,16 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent,
275 dput(result); 323 dput(result);
276 /* It might be justifiable to return ESTALE here, 324 /* It might be justifiable to return ESTALE here,
277 * but the filehandle at-least looks reasonable good 325 * but the filehandle at-least looks reasonable good
278 * and it just be a permission problem, so returning 326 * and it may just be a permission problem, so returning
279 * -EACCESS is safer 327 * -EACCESS is safer
280 */ 328 */
281 return ERR_PTR(-EACCES); 329 return ERR_PTR(-EACCES);
282 330
283 err_target:
284 dput(target_dir);
285 err_result: 331 err_result:
286 dput(result); 332 dput(result);
287 err_out:
288 return ERR_PTR(err); 333 return ERR_PTR(err);
289} 334}
290 335
291
292
293static struct dentry *get_parent(struct dentry *child)
294{
295 /* get_parent cannot be supported generically, the locking
296 * is too icky.
297 * instead, we just return EACCES. If server reboots or inodes
298 * get flushed, you lose
299 */
300 return ERR_PTR(-EACCES);
301}
302
303
304struct getdents_callback { 336struct getdents_callback {
305 char *name; /* name that was found. It already points to a 337 char *name; /* name that was found. It already points to a
306 buffer NAME_MAX+1 is size */ 338 buffer NAME_MAX+1 is size */
@@ -390,61 +422,6 @@ out:
390 return error; 422 return error;
391} 423}
392 424
393
394static struct dentry *export_iget(struct super_block *sb, unsigned long ino, __u32 generation)
395{
396
397 /* iget isn't really right if the inode is currently unallocated!!
398 * This should really all be done inside each filesystem
399 *
400 * ext2fs' read_inode has been strengthed to return a bad_inode if
401 * the inode had been deleted.
402 *
403 * Currently we don't know the generation for parent directory, so
404 * a generation of 0 means "accept any"
405 */
406 struct inode *inode;
407 struct dentry *result;
408 if (ino == 0)
409 return ERR_PTR(-ESTALE);
410 inode = iget(sb, ino);
411 if (inode == NULL)
412 return ERR_PTR(-ENOMEM);
413 if (is_bad_inode(inode)
414 || (generation && inode->i_generation != generation)
415 ) {
416 /* we didn't find the right inode.. */
417 dprintk("fh_verify: Inode %lu, Bad count: %d %d or version %u %u\n",
418 inode->i_ino,
419 inode->i_nlink, atomic_read(&inode->i_count),
420 inode->i_generation,
421 generation);
422
423 iput(inode);
424 return ERR_PTR(-ESTALE);
425 }
426 /* now to find a dentry.
427 * If possible, get a well-connected one
428 */
429 result = d_alloc_anon(inode);
430 if (!result) {
431 iput(inode);
432 return ERR_PTR(-ENOMEM);
433 }
434 return result;
435}
436
437
438static struct dentry *get_object(struct super_block *sb, void *vobjp)
439{
440 __u32 *objp = vobjp;
441 unsigned long ino = objp[0];
442 __u32 generation = objp[1];
443
444 return export_iget(sb, ino, generation);
445}
446
447
448/** 425/**
449 * export_encode_fh - default export_operations->encode_fh function 426 * export_encode_fh - default export_operations->encode_fh function
450 * @dentry: the dentry to encode 427 * @dentry: the dentry to encode
@@ -517,16 +494,40 @@ static struct dentry *export_decode_fh(struct super_block *sb, __u32 *fh, int fh
517 acceptable, context); 494 acceptable, context);
518} 495}
519 496
520struct export_operations export_op_default = { 497int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
521 .decode_fh = export_decode_fh, 498 int connectable)
522 .encode_fh = export_encode_fh, 499{
500 struct export_operations *nop = dentry->d_sb->s_export_op;
501 int error;
502
503 if (nop->encode_fh)
504 error = nop->encode_fh(dentry, fh, max_len, connectable);
505 else
506 error = export_encode_fh(dentry, fh, max_len, connectable);
523 507
524 .get_name = get_name, 508 return error;
525 .get_parent = get_parent, 509}
526 .get_dentry = get_object, 510EXPORT_SYMBOL_GPL(exportfs_encode_fh);
527}; 511
512struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh, int fh_len,
513 int fileid_type, int (*acceptable)(void *, struct dentry *),
514 void *context)
515{
516 struct export_operations *nop = mnt->mnt_sb->s_export_op;
517 struct dentry *result;
518
519 if (nop->decode_fh) {
520 result = nop->decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type,
521 acceptable, context);
522 } else {
523 result = export_decode_fh(mnt->mnt_sb, fh, fh_len, fileid_type,
524 acceptable, context);
525 }
526
527 return result;
528}
529EXPORT_SYMBOL_GPL(exportfs_decode_fh);
528 530
529EXPORT_SYMBOL(export_op_default);
530EXPORT_SYMBOL(find_exported_dentry); 531EXPORT_SYMBOL(find_exported_dentry);
531 532
532MODULE_LICENSE("GPL"); 533MODULE_LICENSE("GPL");
diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
index 7c420b800c34..e58669e1b87c 100644
--- a/fs/ext2/acl.c
+++ b/fs/ext2/acl.c
@@ -464,7 +464,7 @@ ext2_xattr_set_acl(struct inode *inode, int type, const void *value,
464 464
465 if (!test_opt(inode->i_sb, POSIX_ACL)) 465 if (!test_opt(inode->i_sb, POSIX_ACL))
466 return -EOPNOTSUPP; 466 return -EOPNOTSUPP;
467 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 467 if (!is_owner_or_cap(inode))
468 return -EPERM; 468 return -EPERM;
469 469
470 if (value) { 470 if (value) {
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index e85c48218239..3bcd25422ee4 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -36,7 +36,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
36 if (IS_RDONLY(inode)) 36 if (IS_RDONLY(inode))
37 return -EROFS; 37 return -EROFS;
38 38
39 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 39 if (!is_owner_or_cap(inode))
40 return -EACCES; 40 return -EACCES;
41 41
42 if (get_user(flags, (int __user *) arg)) 42 if (get_user(flags, (int __user *) arg))
@@ -74,7 +74,7 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
74 case EXT2_IOC_GETVERSION: 74 case EXT2_IOC_GETVERSION:
75 return put_user(inode->i_generation, (int __user *) arg); 75 return put_user(inode->i_generation, (int __user *) arg);
76 case EXT2_IOC_SETVERSION: 76 case EXT2_IOC_SETVERSION:
77 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 77 if (!is_owner_or_cap(inode))
78 return -EPERM; 78 return -EPERM;
79 if (IS_RDONLY(inode)) 79 if (IS_RDONLY(inode))
80 return -EROFS; 80 return -EROFS;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index b2efd9083b9b..3eefa97fe204 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -25,6 +25,7 @@
25#include <linux/parser.h> 25#include <linux/parser.h>
26#include <linux/random.h> 26#include <linux/random.h>
27#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
28#include <linux/exportfs.h>
28#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
29#include <linux/vfs.h> 30#include <linux/vfs.h>
30#include <linux/seq_file.h> 31#include <linux/seq_file.h>
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
index 1e5038d9a01b..d34e9967430a 100644
--- a/fs/ext3/acl.c
+++ b/fs/ext3/acl.c
@@ -489,7 +489,7 @@ ext3_xattr_set_acl(struct inode *inode, int type, const void *value,
489 489
490 if (!test_opt(inode->i_sb, POSIX_ACL)) 490 if (!test_opt(inode->i_sb, POSIX_ACL))
491 return -EOPNOTSUPP; 491 return -EOPNOTSUPP;
492 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 492 if (!is_owner_or_cap(inode))
493 return -EPERM; 493 return -EPERM;
494 494
495 if (value) { 495 if (value) {
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 965006dba6be..4a2a02c95bf9 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -41,7 +41,7 @@ int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
41 if (IS_RDONLY(inode)) 41 if (IS_RDONLY(inode))
42 return -EROFS; 42 return -EROFS;
43 43
44 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 44 if (!is_owner_or_cap(inode))
45 return -EACCES; 45 return -EACCES;
46 46
47 if (get_user(flags, (int __user *) arg)) 47 if (get_user(flags, (int __user *) arg))
@@ -122,7 +122,7 @@ flags_err:
122 __u32 generation; 122 __u32 generation;
123 int err; 123 int err;
124 124
125 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 125 if (!is_owner_or_cap(inode))
126 return -EPERM; 126 return -EPERM;
127 if (IS_RDONLY(inode)) 127 if (IS_RDONLY(inode))
128 return -EROFS; 128 return -EROFS;
@@ -181,7 +181,7 @@ flags_err:
181 if (IS_RDONLY(inode)) 181 if (IS_RDONLY(inode))
182 return -EROFS; 182 return -EROFS;
183 183
184 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 184 if (!is_owner_or_cap(inode))
185 return -EACCES; 185 return -EACCES;
186 186
187 if (get_user(rsv_window_size, (int __user *)arg)) 187 if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 51d1c456cdab..4f84dc86628a 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -29,6 +29,7 @@
29#include <linux/parser.h> 29#include <linux/parser.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/buffer_head.h> 31#include <linux/buffer_head.h>
32#include <linux/exportfs.h>
32#include <linux/vfs.h> 33#include <linux/vfs.h>
33#include <linux/random.h> 34#include <linux/random.h>
34#include <linux/mount.h> 35#include <linux/mount.h>
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 9e882546d91a..a8bae8cd1d5d 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -489,7 +489,7 @@ ext4_xattr_set_acl(struct inode *inode, int type, const void *value,
489 489
490 if (!test_opt(inode->i_sb, POSIX_ACL)) 490 if (!test_opt(inode->i_sb, POSIX_ACL))
491 return -EOPNOTSUPP; 491 return -EOPNOTSUPP;
492 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 492 if (!is_owner_or_cap(inode))
493 return -EPERM; 493 return -EPERM;
494 494
495 if (value) { 495 if (value) {
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 500567dd53b6..7b4aa4543c83 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -40,7 +40,7 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
40 if (IS_RDONLY(inode)) 40 if (IS_RDONLY(inode))
41 return -EROFS; 41 return -EROFS;
42 42
43 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 43 if (!is_owner_or_cap(inode))
44 return -EACCES; 44 return -EACCES;
45 45
46 if (get_user(flags, (int __user *) arg)) 46 if (get_user(flags, (int __user *) arg))
@@ -121,7 +121,7 @@ flags_err:
121 __u32 generation; 121 __u32 generation;
122 int err; 122 int err;
123 123
124 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 124 if (!is_owner_or_cap(inode))
125 return -EPERM; 125 return -EPERM;
126 if (IS_RDONLY(inode)) 126 if (IS_RDONLY(inode))
127 return -EROFS; 127 return -EROFS;
@@ -180,7 +180,7 @@ flags_err:
180 if (IS_RDONLY(inode)) 180 if (IS_RDONLY(inode))
181 return -EROFS; 181 return -EROFS;
182 182
183 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 183 if (!is_owner_or_cap(inode))
184 return -EACCES; 184 return -EACCES;
185 185
186 if (get_user(rsv_window_size, (int __user *)arg)) 186 if (get_user(rsv_window_size, (int __user *)arg))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index d0d8c76c7edb..b806e689c4aa 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -29,6 +29,7 @@
29#include <linux/parser.h> 29#include <linux/parser.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/buffer_head.h> 31#include <linux/buffer_head.h>
32#include <linux/exportfs.h>
32#include <linux/vfs.h> 33#include <linux/vfs.h>
33#include <linux/random.h> 34#include <linux/random.h>
34#include <linux/mount.h> 35#include <linux/mount.h>
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cfaf5877d98b..0a7ddb39a593 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -20,6 +20,7 @@
20#include <linux/pagemap.h> 20#include <linux/pagemap.h>
21#include <linux/mpage.h> 21#include <linux/mpage.h>
22#include <linux/buffer_head.h> 22#include <linux/buffer_head.h>
23#include <linux/exportfs.h>
23#include <linux/mount.h> 24#include <linux/mount.h>
24#include <linux/vfs.h> 25#include <linux/vfs.h>
25#include <linux/parser.h> 26#include <linux/parser.h>
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 8e382a5d51bd..3f22e9f4f691 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -215,7 +215,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
215 215
216 /* O_NOATIME can only be set by the owner or superuser */ 216 /* O_NOATIME can only be set by the owner or superuser */
217 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) 217 if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
218 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) 218 if (!is_owner_or_cap(inode))
219 return -EPERM; 219 return -EPERM;
220 220
221 /* required for strict SunOS emulation */ 221 /* required for strict SunOS emulation */
diff --git a/fs/generic_acl.c b/fs/generic_acl.c
index 9ccb78947171..995d63b2e747 100644
--- a/fs/generic_acl.c
+++ b/fs/generic_acl.c
@@ -78,7 +78,7 @@ generic_acl_set(struct inode *inode, struct generic_acl_operations *ops,
78 78
79 if (S_ISLNK(inode->i_mode)) 79 if (S_ISLNK(inode->i_mode))
80 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
81 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) 81 if (!is_owner_or_cap(inode))
82 return -EPERM; 82 return -EPERM;
83 if (value) { 83 if (value) {
84 acl = posix_acl_from_xattr(value, size); 84 acl = posix_acl_from_xattr(value, size);
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index 6e80844367ee..1047a8c7226a 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -74,7 +74,7 @@ int gfs2_acl_validate_remove(struct gfs2_inode *ip, int access)
74{ 74{
75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl) 75 if (!GFS2_SB(&ip->i_inode)->sd_args.ar_posix_acl)
76 return -EOPNOTSUPP; 76 return -EOPNOTSUPP;
77 if (current->fsuid != ip->i_inode.i_uid && !capable(CAP_FOWNER)) 77 if (!is_owner_or_cap(&ip->i_inode))
78 return -EPERM; 78 return -EPERM;
79 if (S_ISLNK(ip->i_inode.i_mode)) 79 if (S_ISLNK(ip->i_inode.i_mode))
80 return -EOPNOTSUPP; 80 return -EOPNOTSUPP;
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 99ea5659bc2c..b8312edee0e4 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -11,6 +11,7 @@
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/completion.h> 12#include <linux/completion.h>
13#include <linux/buffer_head.h> 13#include <linux/buffer_head.h>
14#include <linux/exportfs.h>
14#include <linux/gfs2_ondisk.h> 15#include <linux/gfs2_ondisk.h>
15#include <linux/crc32.h> 16#include <linux/crc32.h>
16#include <linux/lm_interface.h> 17#include <linux/lm_interface.h>
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index 79fd10402ea3..b60c0affbec5 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -38,7 +38,7 @@ int hfsplus_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
38 if (IS_RDONLY(inode)) 38 if (IS_RDONLY(inode))
39 return -EROFS; 39 return -EROFS;
40 40
41 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 41 if (!is_owner_or_cap(inode))
42 return -EACCES; 42 return -EACCES;
43 43
44 if (get_user(flags, (int __user *)arg)) 44 if (get_user(flags, (int __user *)arg))
diff --git a/fs/inode.c b/fs/inode.c
index 9a012cc5b6cd..320e088d0b28 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -145,7 +145,7 @@ static struct inode *alloc_inode(struct super_block *sb)
145 mapping->a_ops = &empty_aops; 145 mapping->a_ops = &empty_aops;
146 mapping->host = inode; 146 mapping->host = inode;
147 mapping->flags = 0; 147 mapping->flags = 0;
148 mapping_set_gfp_mask(mapping, GFP_HIGHUSER); 148 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE);
149 mapping->assoc_mapping = NULL; 149 mapping->assoc_mapping = NULL;
150 mapping->backing_dev_info = &default_backing_dev_info; 150 mapping->backing_dev_info = &default_backing_dev_info;
151 151
@@ -462,6 +462,11 @@ static int shrink_icache_memory(int nr, gfp_t gfp_mask)
462 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; 462 return (inodes_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
463} 463}
464 464
465static struct shrinker icache_shrinker = {
466 .shrink = shrink_icache_memory,
467 .seeks = DEFAULT_SEEKS,
468};
469
465static void __wait_on_freeing_inode(struct inode *inode); 470static void __wait_on_freeing_inode(struct inode *inode);
466/* 471/*
467 * Called with the inode lock held. 472 * Called with the inode lock held.
@@ -519,7 +524,13 @@ repeat:
519 * new_inode - obtain an inode 524 * new_inode - obtain an inode
520 * @sb: superblock 525 * @sb: superblock
521 * 526 *
522 * Allocates a new inode for given superblock. 527 * Allocates a new inode for given superblock. The default gfp_mask
528 * for allocations related to inode->i_mapping is GFP_HIGHUSER_PAGECACHE.
529 * If HIGHMEM pages are unsuitable or it is known that pages allocated
530 * for the page cache are not reclaimable or migratable,
531 * mapping_set_gfp_mask() must be called with suitable flags on the
532 * newly created inode's mapping
533 *
523 */ 534 */
524struct inode *new_inode(struct super_block *sb) 535struct inode *new_inode(struct super_block *sb)
525{ 536{
@@ -1379,7 +1390,7 @@ void __init inode_init(unsigned long mempages)
1379 SLAB_MEM_SPREAD), 1390 SLAB_MEM_SPREAD),
1380 init_once, 1391 init_once,
1381 NULL); 1392 NULL);
1382 set_shrinker(DEFAULT_SEEKS, shrink_icache_memory); 1393 register_shrinker(&icache_shrinker);
1383 1394
1384 /* Hash may have been set up in inode_init_early */ 1395 /* Hash may have been set up in inode_init_early */
1385 if (!hashdist) 1396 if (!hashdist)
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index efe2872cd4e3..a07e67b1ea7f 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -1,5 +1,6 @@
1#include <linux/fs.h> 1#include <linux/fs.h>
2#include <linux/buffer_head.h> 2#include <linux/buffer_head.h>
3#include <linux/exportfs.h>
3#include <linux/iso_fs.h> 4#include <linux/iso_fs.h>
4#include <asm/unaligned.h> 5#include <asm/unaligned.h>
5 6
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index a46101ee867a..65b3a1b5b88d 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -435,7 +435,7 @@ static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value,
435 struct posix_acl *acl; 435 struct posix_acl *acl;
436 int rc; 436 int rc;
437 437
438 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 438 if (!is_owner_or_cap(inode))
439 return -EPERM; 439 return -EPERM;
440 440
441 if (value) { 441 if (value) {
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 0c82dfcfd246..143c5530caf3 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -81,6 +81,7 @@ static int jffs2_garbage_collect_thread(void *_c)
81 81
82 set_user_nice(current, 10); 82 set_user_nice(current, 10);
83 83
84 set_freezable();
84 for (;;) { 85 for (;;) {
85 allow_signal(SIGHUP); 86 allow_signal(SIGHUP);
86 87
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index fe063af6fd2f..3c8663bea98c 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -69,7 +69,7 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
69 if (IS_RDONLY(inode)) 69 if (IS_RDONLY(inode))
70 return -EROFS; 70 return -EROFS;
71 71
72 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 72 if (!is_owner_or_cap(inode))
73 return -EACCES; 73 return -EACCES;
74 74
75 if (get_user(flags, (int __user *) arg)) 75 if (get_user(flags, (int __user *) arg))
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 2374b595f2e1..f0ec72b263f1 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -32,6 +32,7 @@ extern void jfs_truncate_nolock(struct inode *, loff_t);
32extern void jfs_free_zero_link(struct inode *); 32extern void jfs_free_zero_link(struct inode *);
33extern struct dentry *jfs_get_parent(struct dentry *dentry); 33extern struct dentry *jfs_get_parent(struct dentry *dentry);
34extern void jfs_get_inode_flags(struct jfs_inode_info *); 34extern void jfs_get_inode_flags(struct jfs_inode_info *);
35extern struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp);
35extern void jfs_set_inode_flags(struct inode *); 36extern void jfs_set_inode_flags(struct inode *);
36extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 37extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
37 38
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 25161c4121e4..932797ba433b 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1477,6 +1477,38 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
1477 return dentry; 1477 return dentry;
1478} 1478}
1479 1479
1480struct dentry *jfs_get_dentry(struct super_block *sb, void *vobjp)
1481{
1482 __u32 *objp = vobjp;
1483 unsigned long ino = objp[0];
1484 __u32 generation = objp[1];
1485 struct inode *inode;
1486 struct dentry *result;
1487
1488 if (ino == 0)
1489 return ERR_PTR(-ESTALE);
1490 inode = iget(sb, ino);
1491 if (inode == NULL)
1492 return ERR_PTR(-ENOMEM);
1493
1494 if (is_bad_inode(inode) ||
1495 (generation && inode->i_generation != generation)) {
1496 result = ERR_PTR(-ESTALE);
1497 goto out_iput;
1498 }
1499
1500 result = d_alloc_anon(inode);
1501 if (!result) {
1502 result = ERR_PTR(-ENOMEM);
1503 goto out_iput;
1504 }
1505 return result;
1506
1507 out_iput:
1508 iput(inode);
1509 return result;
1510}
1511
1480struct dentry *jfs_get_parent(struct dentry *dentry) 1512struct dentry *jfs_get_parent(struct dentry *dentry)
1481{ 1513{
1482 struct super_block *sb = dentry->d_inode->i_sb; 1514 struct super_block *sb = dentry->d_inode->i_sb;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 20e4ac1c79a3..929fceca7999 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -27,6 +27,7 @@
27#include <linux/kthread.h> 27#include <linux/kthread.h>
28#include <linux/posix_acl.h> 28#include <linux/posix_acl.h>
29#include <linux/buffer_head.h> 29#include <linux/buffer_head.h>
30#include <linux/exportfs.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <linux/seq_file.h> 32#include <linux/seq_file.h>
32 33
@@ -737,6 +738,7 @@ static const struct super_operations jfs_super_operations = {
737}; 738};
738 739
739static struct export_operations jfs_export_operations = { 740static struct export_operations jfs_export_operations = {
741 .get_dentry = jfs_get_dentry,
740 .get_parent = jfs_get_parent, 742 .get_parent = jfs_get_parent,
741}; 743};
742 744
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index b2375f0774b7..9b7f2cdaae0a 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -697,7 +697,7 @@ static int can_set_system_xattr(struct inode *inode, const char *name,
697 struct posix_acl *acl; 697 struct posix_acl *acl;
698 int rc; 698 int rc;
699 699
700 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 700 if (!is_owner_or_cap(inode))
701 return -EPERM; 701 return -EPERM;
702 702
703 /* 703 /*
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 26809325469c..82e2192a0d5c 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -25,6 +25,7 @@
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
27#include <linux/mutex.h> 27#include <linux/mutex.h>
28#include <linux/freezer.h>
28 29
29#include <linux/sunrpc/types.h> 30#include <linux/sunrpc/types.h>
30#include <linux/sunrpc/stats.h> 31#include <linux/sunrpc/stats.h>
@@ -75,18 +76,31 @@ static const int nlm_port_min = 0, nlm_port_max = 65535;
75 76
76static struct ctl_table_header * nlm_sysctl_table; 77static struct ctl_table_header * nlm_sysctl_table;
77 78
78static unsigned long set_grace_period(void) 79static unsigned long get_lockd_grace_period(void)
79{ 80{
80 unsigned long grace_period;
81
82 /* Note: nlm_timeout should always be nonzero */ 81 /* Note: nlm_timeout should always be nonzero */
83 if (nlm_grace_period) 82 if (nlm_grace_period)
84 grace_period = ((nlm_grace_period + nlm_timeout - 1) 83 return roundup(nlm_grace_period, nlm_timeout) * HZ;
85 / nlm_timeout) * nlm_timeout * HZ;
86 else 84 else
87 grace_period = nlm_timeout * 5 * HZ; 85 return nlm_timeout * 5 * HZ;
86}
87
88unsigned long get_nfs_grace_period(void)
89{
90 unsigned long lockdgrace = get_lockd_grace_period();
91 unsigned long nfsdgrace = 0;
92
93 if (nlmsvc_ops)
94 nfsdgrace = nlmsvc_ops->get_grace_period();
95
96 return max(lockdgrace, nfsdgrace);
97}
98EXPORT_SYMBOL(get_nfs_grace_period);
99
100static unsigned long set_grace_period(void)
101{
88 nlmsvc_grace_period = 1; 102 nlmsvc_grace_period = 1;
89 return grace_period + jiffies; 103 return get_nfs_grace_period() + jiffies;
90} 104}
91 105
92static inline void clear_grace_period(void) 106static inline void clear_grace_period(void)
@@ -119,6 +133,7 @@ lockd(struct svc_rqst *rqstp)
119 complete(&lockd_start_done); 133 complete(&lockd_start_done);
120 134
121 daemonize("lockd"); 135 daemonize("lockd");
136 set_freezable();
122 137
123 /* Process request with signals blocked, but allow SIGKILL. */ 138 /* Process request with signals blocked, but allow SIGKILL. */
124 allow_signal(SIGKILL); 139 allow_signal(SIGKILL);
diff --git a/fs/mbcache.c b/fs/mbcache.c
index deeb9dc062d9..fbb1d02f8791 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -100,7 +100,6 @@ struct mb_cache {
100static LIST_HEAD(mb_cache_list); 100static LIST_HEAD(mb_cache_list);
101static LIST_HEAD(mb_cache_lru_list); 101static LIST_HEAD(mb_cache_lru_list);
102static DEFINE_SPINLOCK(mb_cache_spinlock); 102static DEFINE_SPINLOCK(mb_cache_spinlock);
103static struct shrinker *mb_shrinker;
104 103
105static inline int 104static inline int
106mb_cache_indexes(struct mb_cache *cache) 105mb_cache_indexes(struct mb_cache *cache)
@@ -118,6 +117,10 @@ mb_cache_indexes(struct mb_cache *cache)
118 117
119static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); 118static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask);
120 119
120static struct shrinker mb_cache_shrinker = {
121 .shrink = mb_cache_shrink_fn,
122 .seeks = DEFAULT_SEEKS,
123};
121 124
122static inline int 125static inline int
123__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) 126__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
@@ -662,13 +665,13 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
662 665
663static int __init init_mbcache(void) 666static int __init init_mbcache(void)
664{ 667{
665 mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn); 668 register_shrinker(&mb_cache_shrinker);
666 return 0; 669 return 0;
667} 670}
668 671
669static void __exit exit_mbcache(void) 672static void __exit exit_mbcache(void)
670{ 673{
671 remove_shrinker(mb_shrinker); 674 unregister_shrinker(&mb_cache_shrinker);
672} 675}
673 676
674module_init(init_mbcache) 677module_init(init_mbcache)
diff --git a/fs/namei.c b/fs/namei.c
index 5e2d98d10c5d..defaa47c11d4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1576,7 +1576,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag)
1576 1576
1577 /* O_NOATIME can only be set by the owner or superuser */ 1577 /* O_NOATIME can only be set by the owner or superuser */
1578 if (flag & O_NOATIME) 1578 if (flag & O_NOATIME)
1579 if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) 1579 if (!is_owner_or_cap(inode))
1580 return -EPERM; 1580 return -EPERM;
1581 1581
1582 /* 1582 /*
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 75f309c8741a..a796be5051bf 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -14,6 +14,7 @@
14#include <linux/sunrpc/svcsock.h> 14#include <linux/sunrpc/svcsock.h>
15#include <linux/nfs_fs.h> 15#include <linux/nfs_fs.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/freezer.h>
17 18
18#include <net/inet_sock.h> 19#include <net/inet_sock.h>
19 20
@@ -67,6 +68,7 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
67 daemonize("nfsv4-svc"); 68 daemonize("nfsv4-svc");
68 /* Process request with signals blocked, but allow SIGKILL. */ 69 /* Process request with signals blocked, but allow SIGKILL. */
69 allow_signal(SIGKILL); 70 allow_signal(SIGKILL);
71 set_freezable();
70 72
71 complete(&nfs_callback_info.started); 73 complete(&nfs_callback_info.started);
72 74
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index a2b1af89ca1a..adffe1615c51 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -300,7 +300,10 @@ static const struct super_operations nfs4_sops = {
300}; 300};
301#endif 301#endif
302 302
303static struct shrinker *acl_shrinker; 303static struct shrinker acl_shrinker = {
304 .shrink = nfs_access_cache_shrinker,
305 .seeks = DEFAULT_SEEKS,
306};
304 307
305/* 308/*
306 * Register the NFS filesystems 309 * Register the NFS filesystems
@@ -321,7 +324,7 @@ int __init register_nfs_fs(void)
321 if (ret < 0) 324 if (ret < 0)
322 goto error_2; 325 goto error_2;
323#endif 326#endif
324 acl_shrinker = set_shrinker(DEFAULT_SEEKS, nfs_access_cache_shrinker); 327 register_shrinker(&acl_shrinker);
325 return 0; 328 return 0;
326 329
327#ifdef CONFIG_NFS_V4 330#ifdef CONFIG_NFS_V4
@@ -339,8 +342,7 @@ error_0:
339 */ 342 */
340void __exit unregister_nfs_fs(void) 343void __exit unregister_nfs_fs(void)
341{ 344{
342 if (acl_shrinker != NULL) 345 unregister_shrinker(&acl_shrinker);
343 remove_shrinker(acl_shrinker);
344#ifdef CONFIG_NFS_V4 346#ifdef CONFIG_NFS_V4
345 unregister_filesystem(&nfs4_fs_type); 347 unregister_filesystem(&nfs4_fs_type);
346 nfs_unregister_sysctl(); 348 nfs_unregister_sysctl();
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 6e92b0fe5323..cf61dc8ae942 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -12,17 +12,31 @@
12 12
13#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE)) 13#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
14 14
15static int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
16{
17 struct exp_flavor_info *f;
18 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
19
20 for (f = exp->ex_flavors; f < end; f++) {
21 if (f->pseudoflavor == rqstp->rq_flavor)
22 return f->flags;
23 }
24 return exp->ex_flags;
25
26}
27
15int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) 28int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
16{ 29{
17 struct svc_cred cred = rqstp->rq_cred; 30 struct svc_cred cred = rqstp->rq_cred;
18 int i; 31 int i;
32 int flags = nfsexp_flags(rqstp, exp);
19 int ret; 33 int ret;
20 34
21 if (exp->ex_flags & NFSEXP_ALLSQUASH) { 35 if (flags & NFSEXP_ALLSQUASH) {
22 cred.cr_uid = exp->ex_anon_uid; 36 cred.cr_uid = exp->ex_anon_uid;
23 cred.cr_gid = exp->ex_anon_gid; 37 cred.cr_gid = exp->ex_anon_gid;
24 cred.cr_group_info = groups_alloc(0); 38 cred.cr_group_info = groups_alloc(0);
25 } else if (exp->ex_flags & NFSEXP_ROOTSQUASH) { 39 } else if (flags & NFSEXP_ROOTSQUASH) {
26 struct group_info *gi; 40 struct group_info *gi;
27 if (!cred.cr_uid) 41 if (!cred.cr_uid)
28 cred.cr_uid = exp->ex_anon_uid; 42 cred.cr_uid = exp->ex_anon_uid;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 79bd03b8bbf8..c7bbf460b009 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -26,12 +26,15 @@
26#include <linux/mount.h> 26#include <linux/mount.h>
27#include <linux/hash.h> 27#include <linux/hash.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/exportfs.h>
29 30
30#include <linux/sunrpc/svc.h> 31#include <linux/sunrpc/svc.h>
31#include <linux/nfsd/nfsd.h> 32#include <linux/nfsd/nfsd.h>
32#include <linux/nfsd/nfsfh.h> 33#include <linux/nfsd/nfsfh.h>
33#include <linux/nfsd/syscall.h> 34#include <linux/nfsd/syscall.h>
34#include <linux/lockd/bind.h> 35#include <linux/lockd/bind.h>
36#include <linux/sunrpc/msg_prot.h>
37#include <linux/sunrpc/gss_api.h>
35 38
36#define NFSDDBG_FACILITY NFSDDBG_EXPORT 39#define NFSDDBG_FACILITY NFSDDBG_EXPORT
37 40
@@ -451,8 +454,48 @@ out_free_all:
451 return err; 454 return err;
452} 455}
453 456
457static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
458{
459 int listsize, err;
460 struct exp_flavor_info *f;
461
462 err = get_int(mesg, &listsize);
463 if (err)
464 return err;
465 if (listsize < 0 || listsize > MAX_SECINFO_LIST)
466 return -EINVAL;
467
468 for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) {
469 err = get_int(mesg, &f->pseudoflavor);
470 if (err)
471 return err;
472 /*
473 * Just a quick sanity check; we could also try to check
474 * whether this pseudoflavor is supported, but at worst
475 * an unsupported pseudoflavor on the export would just
476 * be a pseudoflavor that won't match the flavor of any
477 * authenticated request. The administrator will
478 * probably discover the problem when someone fails to
479 * authenticate.
480 */
481 if (f->pseudoflavor < 0)
482 return -EINVAL;
483 err = get_int(mesg, &f->flags);
484 if (err)
485 return err;
486 /* Only some flags are allowed to differ between flavors: */
487 if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags))
488 return -EINVAL;
489 }
490 exp->ex_nflavors = listsize;
491 return 0;
492}
493
454#else /* CONFIG_NFSD_V4 */ 494#else /* CONFIG_NFSD_V4 */
455static inline int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) { return 0; } 495static inline int
496fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc){return 0;}
497static inline int
498secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; }
456#endif 499#endif
457 500
458static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) 501static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
@@ -476,6 +519,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
476 519
477 exp.ex_uuid = NULL; 520 exp.ex_uuid = NULL;
478 521
522 /* secinfo */
523 exp.ex_nflavors = 0;
524
479 if (mesg[mlen-1] != '\n') 525 if (mesg[mlen-1] != '\n')
480 return -EINVAL; 526 return -EINVAL;
481 mesg[mlen-1] = 0; 527 mesg[mlen-1] = 0;
@@ -553,7 +599,9 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
553 if (exp.ex_uuid == NULL) 599 if (exp.ex_uuid == NULL)
554 err = -ENOMEM; 600 err = -ENOMEM;
555 } 601 }
556 } else 602 } else if (strcmp(buf, "secinfo") == 0)
603 err = secinfo_parse(&mesg, buf, &exp);
604 else
557 /* quietly ignore unknown words and anything 605 /* quietly ignore unknown words and anything
558 * following. Newer user-space can try to set 606 * following. Newer user-space can try to set
559 * new values, then see what the result was. 607 * new values, then see what the result was.
@@ -593,6 +641,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
593 641
594static void exp_flags(struct seq_file *m, int flag, int fsid, 642static void exp_flags(struct seq_file *m, int flag, int fsid,
595 uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fslocs); 643 uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fslocs);
644static void show_secinfo(struct seq_file *m, struct svc_export *exp);
596 645
597static int svc_export_show(struct seq_file *m, 646static int svc_export_show(struct seq_file *m,
598 struct cache_detail *cd, 647 struct cache_detail *cd,
@@ -622,6 +671,7 @@ static int svc_export_show(struct seq_file *m,
622 seq_printf(m, "%02x", exp->ex_uuid[i]); 671 seq_printf(m, "%02x", exp->ex_uuid[i]);
623 } 672 }
624 } 673 }
674 show_secinfo(m, exp);
625 } 675 }
626 seq_puts(m, ")\n"); 676 seq_puts(m, ")\n");
627 return 0; 677 return 0;
@@ -654,6 +704,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
654{ 704{
655 struct svc_export *new = container_of(cnew, struct svc_export, h); 705 struct svc_export *new = container_of(cnew, struct svc_export, h);
656 struct svc_export *item = container_of(citem, struct svc_export, h); 706 struct svc_export *item = container_of(citem, struct svc_export, h);
707 int i;
657 708
658 new->ex_flags = item->ex_flags; 709 new->ex_flags = item->ex_flags;
659 new->ex_anon_uid = item->ex_anon_uid; 710 new->ex_anon_uid = item->ex_anon_uid;
@@ -669,6 +720,10 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
669 item->ex_fslocs.locations_count = 0; 720 item->ex_fslocs.locations_count = 0;
670 new->ex_fslocs.migrated = item->ex_fslocs.migrated; 721 new->ex_fslocs.migrated = item->ex_fslocs.migrated;
671 item->ex_fslocs.migrated = 0; 722 item->ex_fslocs.migrated = 0;
723 new->ex_nflavors = item->ex_nflavors;
724 for (i = 0; i < MAX_SECINFO_LIST; i++) {
725 new->ex_flavors[i] = item->ex_flavors[i];
726 }
672} 727}
673 728
674static struct cache_head *svc_export_alloc(void) 729static struct cache_head *svc_export_alloc(void)
@@ -738,16 +793,18 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
738 int err; 793 int err;
739 794
740 if (!clp) 795 if (!clp)
741 return NULL; 796 return ERR_PTR(-ENOENT);
742 797
743 key.ek_client = clp; 798 key.ek_client = clp;
744 key.ek_fsidtype = fsid_type; 799 key.ek_fsidtype = fsid_type;
745 memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); 800 memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
746 801
747 ek = svc_expkey_lookup(&key); 802 ek = svc_expkey_lookup(&key);
748 if (ek != NULL) 803 if (ek == NULL)
749 if ((err = cache_check(&svc_expkey_cache, &ek->h, reqp))) 804 return ERR_PTR(-ENOMEM);
750 ek = ERR_PTR(err); 805 err = cache_check(&svc_expkey_cache, &ek->h, reqp);
806 if (err)
807 return ERR_PTR(err);
751 return ek; 808 return ek;
752} 809}
753 810
@@ -808,30 +865,21 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
808 struct cache_req *reqp) 865 struct cache_req *reqp)
809{ 866{
810 struct svc_export *exp, key; 867 struct svc_export *exp, key;
868 int err;
811 869
812 if (!clp) 870 if (!clp)
813 return NULL; 871 return ERR_PTR(-ENOENT);
814 872
815 key.ex_client = clp; 873 key.ex_client = clp;
816 key.ex_mnt = mnt; 874 key.ex_mnt = mnt;
817 key.ex_dentry = dentry; 875 key.ex_dentry = dentry;
818 876
819 exp = svc_export_lookup(&key); 877 exp = svc_export_lookup(&key);
820 if (exp != NULL) { 878 if (exp == NULL)
821 int err; 879 return ERR_PTR(-ENOMEM);
822 880 err = cache_check(&svc_export_cache, &exp->h, reqp);
823 err = cache_check(&svc_export_cache, &exp->h, reqp); 881 if (err)
824 switch (err) { 882 return ERR_PTR(err);
825 case 0: break;
826 case -EAGAIN:
827 case -ETIMEDOUT:
828 exp = ERR_PTR(err);
829 break;
830 default:
831 exp = NULL;
832 }
833 }
834
835 return exp; 883 return exp;
836} 884}
837 885
@@ -847,7 +895,7 @@ exp_parent(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
847 dget(dentry); 895 dget(dentry);
848 exp = exp_get_by_name(clp, mnt, dentry, reqp); 896 exp = exp_get_by_name(clp, mnt, dentry, reqp);
849 897
850 while (exp == NULL && !IS_ROOT(dentry)) { 898 while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(dentry)) {
851 struct dentry *parent; 899 struct dentry *parent;
852 900
853 parent = dget_parent(dentry); 901 parent = dget_parent(dentry);
@@ -900,7 +948,7 @@ static void exp_fsid_unhash(struct svc_export *exp)
900 return; 948 return;
901 949
902 ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid); 950 ek = exp_get_fsid_key(exp->ex_client, exp->ex_fsid);
903 if (ek && !IS_ERR(ek)) { 951 if (!IS_ERR(ek)) {
904 ek->h.expiry_time = get_seconds()-1; 952 ek->h.expiry_time = get_seconds()-1;
905 cache_put(&ek->h, &svc_expkey_cache); 953 cache_put(&ek->h, &svc_expkey_cache);
906 } 954 }
@@ -938,7 +986,7 @@ static void exp_unhash(struct svc_export *exp)
938 struct inode *inode = exp->ex_dentry->d_inode; 986 struct inode *inode = exp->ex_dentry->d_inode;
939 987
940 ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino); 988 ek = exp_get_key(exp->ex_client, inode->i_sb->s_dev, inode->i_ino);
941 if (ek && !IS_ERR(ek)) { 989 if (!IS_ERR(ek)) {
942 ek->h.expiry_time = get_seconds()-1; 990 ek->h.expiry_time = get_seconds()-1;
943 cache_put(&ek->h, &svc_expkey_cache); 991 cache_put(&ek->h, &svc_expkey_cache);
944 } 992 }
@@ -989,13 +1037,12 @@ exp_export(struct nfsctl_export *nxp)
989 1037
990 /* must make sure there won't be an ex_fsid clash */ 1038 /* must make sure there won't be an ex_fsid clash */
991 if ((nxp->ex_flags & NFSEXP_FSID) && 1039 if ((nxp->ex_flags & NFSEXP_FSID) &&
992 (fsid_key = exp_get_fsid_key(clp, nxp->ex_dev)) && 1040 (!IS_ERR(fsid_key = exp_get_fsid_key(clp, nxp->ex_dev))) &&
993 !IS_ERR(fsid_key) &&
994 fsid_key->ek_mnt && 1041 fsid_key->ek_mnt &&
995 (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) ) 1042 (fsid_key->ek_mnt != nd.mnt || fsid_key->ek_dentry != nd.dentry) )
996 goto finish; 1043 goto finish;
997 1044
998 if (exp) { 1045 if (!IS_ERR(exp)) {
999 /* just a flags/id/fsid update */ 1046 /* just a flags/id/fsid update */
1000 1047
1001 exp_fsid_unhash(exp); 1048 exp_fsid_unhash(exp);
@@ -1104,7 +1151,7 @@ exp_unexport(struct nfsctl_export *nxp)
1104 err = -EINVAL; 1151 err = -EINVAL;
1105 exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL); 1152 exp = exp_get_by_name(dom, nd.mnt, nd.dentry, NULL);
1106 path_release(&nd); 1153 path_release(&nd);
1107 if (!exp) 1154 if (IS_ERR(exp))
1108 goto out_domain; 1155 goto out_domain;
1109 1156
1110 exp_do_unexport(exp); 1157 exp_do_unexport(exp);
@@ -1149,10 +1196,6 @@ exp_rootfh(svc_client *clp, char *path, struct knfsd_fh *f, int maxsize)
1149 err = PTR_ERR(exp); 1196 err = PTR_ERR(exp);
1150 goto out; 1197 goto out;
1151 } 1198 }
1152 if (!exp) {
1153 dprintk("nfsd: exp_rootfh export not found.\n");
1154 goto out;
1155 }
1156 1199
1157 /* 1200 /*
1158 * fh must be initialized before calling fh_compose 1201 * fh must be initialized before calling fh_compose
@@ -1176,17 +1219,130 @@ exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
1176{ 1219{
1177 struct svc_export *exp; 1220 struct svc_export *exp;
1178 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); 1221 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
1179 if (!ek || IS_ERR(ek)) 1222 if (IS_ERR(ek))
1180 return ERR_PTR(PTR_ERR(ek)); 1223 return ERR_PTR(PTR_ERR(ek));
1181 1224
1182 exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp); 1225 exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp);
1183 cache_put(&ek->h, &svc_expkey_cache); 1226 cache_put(&ek->h, &svc_expkey_cache);
1184 1227
1185 if (!exp || IS_ERR(exp)) 1228 if (IS_ERR(exp))
1186 return ERR_PTR(PTR_ERR(exp)); 1229 return ERR_PTR(PTR_ERR(exp));
1187 return exp; 1230 return exp;
1188} 1231}
1189 1232
1233__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
1234{
1235 struct exp_flavor_info *f;
1236 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
1237
1238 /* legacy gss-only clients are always OK: */
1239 if (exp->ex_client == rqstp->rq_gssclient)
1240 return 0;
1241 /* ip-address based client; check sec= export option: */
1242 for (f = exp->ex_flavors; f < end; f++) {
1243 if (f->pseudoflavor == rqstp->rq_flavor)
1244 return 0;
1245 }
1246 /* defaults in absence of sec= options: */
1247 if (exp->ex_nflavors == 0) {
1248 if (rqstp->rq_flavor == RPC_AUTH_NULL ||
1249 rqstp->rq_flavor == RPC_AUTH_UNIX)
1250 return 0;
1251 }
1252 return nfserr_wrongsec;
1253}
1254
1255/*
1256 * Uses rq_client and rq_gssclient to find an export; uses rq_client (an
1257 * auth_unix client) if it's available and has secinfo information;
1258 * otherwise, will try to use rq_gssclient.
1259 *
1260 * Called from functions that handle requests; functions that do work on
1261 * behalf of mountd are passed a single client name to use, and should
1262 * use exp_get_by_name() or exp_find().
1263 */
1264struct svc_export *
1265rqst_exp_get_by_name(struct svc_rqst *rqstp, struct vfsmount *mnt,
1266 struct dentry *dentry)
1267{
1268 struct svc_export *gssexp, *exp = NULL;
1269
1270 if (rqstp->rq_client == NULL)
1271 goto gss;
1272
1273 /* First try the auth_unix client: */
1274 exp = exp_get_by_name(rqstp->rq_client, mnt, dentry,
1275 &rqstp->rq_chandle);
1276 if (PTR_ERR(exp) == -ENOENT)
1277 goto gss;
1278 if (IS_ERR(exp))
1279 return exp;
1280 /* If it has secinfo, assume there are no gss/... clients */
1281 if (exp->ex_nflavors > 0)
1282 return exp;
1283gss:
1284 /* Otherwise, try falling back on gss client */
1285 if (rqstp->rq_gssclient == NULL)
1286 return exp;
1287 gssexp = exp_get_by_name(rqstp->rq_gssclient, mnt, dentry,
1288 &rqstp->rq_chandle);
1289 if (PTR_ERR(gssexp) == -ENOENT)
1290 return exp;
1291 if (exp && !IS_ERR(exp))
1292 exp_put(exp);
1293 return gssexp;
1294}
1295
1296struct svc_export *
1297rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
1298{
1299 struct svc_export *gssexp, *exp = NULL;
1300
1301 if (rqstp->rq_client == NULL)
1302 goto gss;
1303
1304 /* First try the auth_unix client: */
1305 exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle);
1306 if (PTR_ERR(exp) == -ENOENT)
1307 goto gss;
1308 if (IS_ERR(exp))
1309 return exp;
1310 /* If it has secinfo, assume there are no gss/... clients */
1311 if (exp->ex_nflavors > 0)
1312 return exp;
1313gss:
1314 /* Otherwise, try falling back on gss client */
1315 if (rqstp->rq_gssclient == NULL)
1316 return exp;
1317 gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv,
1318 &rqstp->rq_chandle);
1319 if (PTR_ERR(gssexp) == -ENOENT)
1320 return exp;
1321 if (exp && !IS_ERR(exp))
1322 exp_put(exp);
1323 return gssexp;
1324}
1325
1326struct svc_export *
1327rqst_exp_parent(struct svc_rqst *rqstp, struct vfsmount *mnt,
1328 struct dentry *dentry)
1329{
1330 struct svc_export *exp;
1331
1332 dget(dentry);
1333 exp = rqst_exp_get_by_name(rqstp, mnt, dentry);
1334
1335 while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(dentry)) {
1336 struct dentry *parent;
1337
1338 parent = dget_parent(dentry);
1339 dput(dentry);
1340 dentry = parent;
1341 exp = rqst_exp_get_by_name(rqstp, mnt, dentry);
1342 }
1343 dput(dentry);
1344 return exp;
1345}
1190 1346
1191/* 1347/*
1192 * Called when we need the filehandle for the root of the pseudofs, 1348 * Called when we need the filehandle for the root of the pseudofs,
@@ -1194,8 +1350,7 @@ exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
1194 * export point with fsid==0 1350 * export point with fsid==0
1195 */ 1351 */
1196__be32 1352__be32
1197exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp, 1353exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
1198 struct cache_req *creq)
1199{ 1354{
1200 struct svc_export *exp; 1355 struct svc_export *exp;
1201 __be32 rv; 1356 __be32 rv;
@@ -1203,12 +1358,16 @@ exp_pseudoroot(struct auth_domain *clp, struct svc_fh *fhp,
1203 1358
1204 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); 1359 mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
1205 1360
1206 exp = exp_find(clp, FSID_NUM, fsidv, creq); 1361 exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
1362 if (PTR_ERR(exp) == -ENOENT)
1363 return nfserr_perm;
1207 if (IS_ERR(exp)) 1364 if (IS_ERR(exp))
1208 return nfserrno(PTR_ERR(exp)); 1365 return nfserrno(PTR_ERR(exp));
1209 if (exp == NULL)
1210 return nfserr_perm;
1211 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL); 1366 rv = fh_compose(fhp, exp, exp->ex_dentry, NULL);
1367 if (rv)
1368 goto out;
1369 rv = check_nfsd_access(exp, rqstp);
1370out:
1212 exp_put(exp); 1371 exp_put(exp);
1213 return rv; 1372 return rv;
1214} 1373}
@@ -1296,28 +1455,62 @@ static struct flags {
1296 { 0, {"", ""}} 1455 { 0, {"", ""}}
1297}; 1456};
1298 1457
1299static void exp_flags(struct seq_file *m, int flag, int fsid, 1458static void show_expflags(struct seq_file *m, int flags, int mask)
1300 uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
1301{ 1459{
1302 int first = 0;
1303 struct flags *flg; 1460 struct flags *flg;
1461 int state, first = 0;
1304 1462
1305 for (flg = expflags; flg->flag; flg++) { 1463 for (flg = expflags; flg->flag; flg++) {
1306 int state = (flg->flag & flag)?0:1; 1464 if (flg->flag & ~mask)
1465 continue;
1466 state = (flg->flag & flags) ? 0 : 1;
1307 if (*flg->name[state]) 1467 if (*flg->name[state])
1308 seq_printf(m, "%s%s", first++?",":"", flg->name[state]); 1468 seq_printf(m, "%s%s", first++?",":"", flg->name[state]);
1309 } 1469 }
1470}
1471
1472static void show_secinfo_flags(struct seq_file *m, int flags)
1473{
1474 seq_printf(m, ",");
1475 show_expflags(m, flags, NFSEXP_SECINFO_FLAGS);
1476}
1477
1478static void show_secinfo(struct seq_file *m, struct svc_export *exp)
1479{
1480 struct exp_flavor_info *f;
1481 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
1482 int lastflags = 0, first = 0;
1483
1484 if (exp->ex_nflavors == 0)
1485 return;
1486 for (f = exp->ex_flavors; f < end; f++) {
1487 if (first || f->flags != lastflags) {
1488 if (!first)
1489 show_secinfo_flags(m, lastflags);
1490 seq_printf(m, ",sec=%d", f->pseudoflavor);
1491 lastflags = f->flags;
1492 } else {
1493 seq_printf(m, ":%d", f->pseudoflavor);
1494 }
1495 }
1496 show_secinfo_flags(m, lastflags);
1497}
1498
1499static void exp_flags(struct seq_file *m, int flag, int fsid,
1500 uid_t anonu, uid_t anong, struct nfsd4_fs_locations *fsloc)
1501{
1502 show_expflags(m, flag, NFSEXP_ALLFLAGS);
1310 if (flag & NFSEXP_FSID) 1503 if (flag & NFSEXP_FSID)
1311 seq_printf(m, "%sfsid=%d", first++?",":"", fsid); 1504 seq_printf(m, ",fsid=%d", fsid);
1312 if (anonu != (uid_t)-2 && anonu != (0x10000-2)) 1505 if (anonu != (uid_t)-2 && anonu != (0x10000-2))
1313 seq_printf(m, "%sanonuid=%d", first++?",":"", anonu); 1506 seq_printf(m, ",sanonuid=%d", anonu);
1314 if (anong != (gid_t)-2 && anong != (0x10000-2)) 1507 if (anong != (gid_t)-2 && anong != (0x10000-2))
1315 seq_printf(m, "%sanongid=%d", first++?",":"", anong); 1508 seq_printf(m, ",sanongid=%d", anong);
1316 if (fsloc && fsloc->locations_count > 0) { 1509 if (fsloc && fsloc->locations_count > 0) {
1317 char *loctype = (fsloc->migrated) ? "refer" : "replicas"; 1510 char *loctype = (fsloc->migrated) ? "refer" : "replicas";
1318 int i; 1511 int i;
1319 1512
1320 seq_printf(m, "%s%s=", first++?",":"", loctype); 1513 seq_printf(m, ",%s=", loctype);
1321 seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\"); 1514 seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\");
1322 seq_putc(m, '@'); 1515 seq_putc(m, '@');
1323 seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\"); 1516 seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\");
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index 221acd1f11f6..9e4a568a5013 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -65,6 +65,7 @@ nlm_fclose(struct file *filp)
65static struct nlmsvc_binding nfsd_nlm_ops = { 65static struct nlmsvc_binding nfsd_nlm_ops = {
66 .fopen = nlm_fopen, /* open file for locking */ 66 .fopen = nlm_fopen, /* open file for locking */
67 .fclose = nlm_fclose, /* close file */ 67 .fclose = nlm_fclose, /* close file */
68 .get_grace_period = get_nfs4_grace_period,
68}; 69};
69 70
70void 71void
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index cc3b7badd486..b6ed38380ab8 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -183,8 +183,13 @@ static void
183summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas) 183summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas)
184{ 184{
185 struct posix_acl_entry *pa, *pe; 185 struct posix_acl_entry *pa, *pe;
186 pas->users = 0; 186
187 pas->groups = 0; 187 /*
188 * Only pas.users and pas.groups need initialization; previous
189 * posix_acl_valid() calls ensure that the other fields will be
190 * initialized in the following loop. But, just to placate gcc:
191 */
192 memset(pas, 0, sizeof(*pas));
188 pas->mask = 07; 193 pas->mask = 07;
189 194
190 pe = acl->a_entries + acl->a_count; 195 pe = acl->a_entries + acl->a_count;
@@ -732,13 +737,16 @@ int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
732 *pacl = posix_state_to_acl(&effective_acl_state, flags); 737 *pacl = posix_state_to_acl(&effective_acl_state, flags);
733 if (IS_ERR(*pacl)) { 738 if (IS_ERR(*pacl)) {
734 ret = PTR_ERR(*pacl); 739 ret = PTR_ERR(*pacl);
740 *pacl = NULL;
735 goto out_dstate; 741 goto out_dstate;
736 } 742 }
737 *dpacl = posix_state_to_acl(&default_acl_state, 743 *dpacl = posix_state_to_acl(&default_acl_state,
738 flags | NFS4_ACL_TYPE_DEFAULT); 744 flags | NFS4_ACL_TYPE_DEFAULT);
739 if (IS_ERR(*dpacl)) { 745 if (IS_ERR(*dpacl)) {
740 ret = PTR_ERR(*dpacl); 746 ret = PTR_ERR(*dpacl);
747 *dpacl = NULL;
741 posix_acl_release(*pacl); 748 posix_acl_release(*pacl);
749 *pacl = NULL;
742 goto out_dstate; 750 goto out_dstate;
743 } 751 }
744 sort_pacl(*pacl); 752 sort_pacl(*pacl);
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 5443c52b57aa..31d6633c7fe4 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -75,7 +75,7 @@ enum nfs_cb_opnum4 {
75#define op_enc_sz 1 75#define op_enc_sz 1
76#define op_dec_sz 2 76#define op_dec_sz 2
77#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) 77#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
78#define enc_stateid_sz 16 78#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
79#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ 79#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
80 1 + enc_stateid_sz + \ 80 1 + enc_stateid_sz + \
81 enc_nfs4_fh_sz) 81 enc_nfs4_fh_sz)
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 45aa21ce6784..2cf9a9a2d89c 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -587,6 +587,15 @@ idmap_lookup(struct svc_rqst *rqstp,
587 return ret; 587 return ret;
588} 588}
589 589
590static char *
591rqst_authname(struct svc_rqst *rqstp)
592{
593 struct auth_domain *clp;
594
595 clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client;
596 return clp->name;
597}
598
590static int 599static int
591idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, 600idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen,
592 uid_t *id) 601 uid_t *id)
@@ -600,7 +609,7 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
600 return -EINVAL; 609 return -EINVAL;
601 memcpy(key.name, name, namelen); 610 memcpy(key.name, name, namelen);
602 key.name[namelen] = '\0'; 611 key.name[namelen] = '\0';
603 strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname)); 612 strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
604 ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item); 613 ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item);
605 if (ret == -ENOENT) 614 if (ret == -ENOENT)
606 ret = -ESRCH; /* nfserr_badname */ 615 ret = -ESRCH; /* nfserr_badname */
@@ -620,7 +629,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
620 }; 629 };
621 int ret; 630 int ret;
622 631
623 strlcpy(key.authname, rqstp->rq_client->name, sizeof(key.authname)); 632 strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
624 ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item); 633 ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item);
625 if (ret == -ENOENT) 634 if (ret == -ENOENT)
626 return sprintf(name, "%u", id); 635 return sprintf(name, "%u", id);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 8522729830db..3c627128e205 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -47,6 +47,7 @@
47#include <linux/nfsd/state.h> 47#include <linux/nfsd/state.h>
48#include <linux/nfsd/xdr4.h> 48#include <linux/nfsd/xdr4.h>
49#include <linux/nfs4_acl.h> 49#include <linux/nfs4_acl.h>
50#include <linux/sunrpc/gss_api.h>
50 51
51#define NFSDDBG_FACILITY NFSDDBG_PROC 52#define NFSDDBG_FACILITY NFSDDBG_PROC
52 53
@@ -286,8 +287,7 @@ nfsd4_putrootfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
286 __be32 status; 287 __be32 status;
287 288
288 fh_put(&cstate->current_fh); 289 fh_put(&cstate->current_fh);
289 status = exp_pseudoroot(rqstp->rq_client, &cstate->current_fh, 290 status = exp_pseudoroot(rqstp, &cstate->current_fh);
290 &rqstp->rq_chandle);
291 return status; 291 return status;
292} 292}
293 293
@@ -474,8 +474,8 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
474 __be32 ret; 474 __be32 ret;
475 475
476 fh_init(&tmp_fh, NFS4_FHSIZE); 476 fh_init(&tmp_fh, NFS4_FHSIZE);
477 if((ret = exp_pseudoroot(rqstp->rq_client, &tmp_fh, 477 ret = exp_pseudoroot(rqstp, &tmp_fh);
478 &rqstp->rq_chandle)) != 0) 478 if (ret)
479 return ret; 479 return ret;
480 if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) { 480 if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) {
481 fh_put(&tmp_fh); 481 fh_put(&tmp_fh);
@@ -611,6 +611,30 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
611} 611}
612 612
613static __be32 613static __be32
614nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
615 struct nfsd4_secinfo *secinfo)
616{
617 struct svc_fh resfh;
618 struct svc_export *exp;
619 struct dentry *dentry;
620 __be32 err;
621
622 fh_init(&resfh, NFS4_FHSIZE);
623 err = nfsd_lookup_dentry(rqstp, &cstate->current_fh,
624 secinfo->si_name, secinfo->si_namelen,
625 &exp, &dentry);
626 if (err)
627 return err;
628 if (dentry->d_inode == NULL) {
629 exp_put(exp);
630 err = nfserr_noent;
631 } else
632 secinfo->si_exp = exp;
633 dput(dentry);
634 return err;
635}
636
637static __be32
614nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 638nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
615 struct nfsd4_setattr *setattr) 639 struct nfsd4_setattr *setattr)
616{ 640{
@@ -1009,6 +1033,9 @@ static struct nfsd4_operation nfsd4_ops[OP_RELEASE_LOCKOWNER+1] = {
1009 [OP_SAVEFH] = { 1033 [OP_SAVEFH] = {
1010 .op_func = (nfsd4op_func)nfsd4_savefh, 1034 .op_func = (nfsd4op_func)nfsd4_savefh,
1011 }, 1035 },
1036 [OP_SECINFO] = {
1037 .op_func = (nfsd4op_func)nfsd4_secinfo,
1038 },
1012 [OP_SETATTR] = { 1039 [OP_SETATTR] = {
1013 .op_func = (nfsd4op_func)nfsd4_setattr, 1040 .op_func = (nfsd4op_func)nfsd4_setattr,
1014 }, 1041 },
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8c52913d7cb6..e4a4c87ec8c6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -49,8 +49,10 @@
49#include <linux/nfsd/state.h> 49#include <linux/nfsd/state.h>
50#include <linux/nfsd/xdr4.h> 50#include <linux/nfsd/xdr4.h>
51#include <linux/namei.h> 51#include <linux/namei.h>
52#include <linux/swap.h>
52#include <linux/mutex.h> 53#include <linux/mutex.h>
53#include <linux/lockd/bind.h> 54#include <linux/lockd/bind.h>
55#include <linux/module.h>
54 56
55#define NFSDDBG_FACILITY NFSDDBG_PROC 57#define NFSDDBG_FACILITY NFSDDBG_PROC
56 58
@@ -149,6 +151,7 @@ get_nfs4_file(struct nfs4_file *fi)
149} 151}
150 152
151static int num_delegations; 153static int num_delegations;
154unsigned int max_delegations;
152 155
153/* 156/*
154 * Open owner state (share locks) 157 * Open owner state (share locks)
@@ -192,7 +195,9 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
192 struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback; 195 struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
193 196
194 dprintk("NFSD alloc_init_deleg\n"); 197 dprintk("NFSD alloc_init_deleg\n");
195 if (num_delegations > STATEID_HASH_SIZE * 4) 198 if (fp->fi_had_conflict)
199 return NULL;
200 if (num_delegations > max_delegations)
196 return NULL; 201 return NULL;
197 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL); 202 dp = kmem_cache_alloc(deleg_slab, GFP_KERNEL);
198 if (dp == NULL) 203 if (dp == NULL)
@@ -999,6 +1004,7 @@ alloc_init_file(struct inode *ino)
999 list_add(&fp->fi_hash, &file_hashtbl[hashval]); 1004 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
1000 fp->fi_inode = igrab(ino); 1005 fp->fi_inode = igrab(ino);
1001 fp->fi_id = current_fileid++; 1006 fp->fi_id = current_fileid++;
1007 fp->fi_had_conflict = false;
1002 return fp; 1008 return fp;
1003 } 1009 }
1004 return NULL; 1010 return NULL;
@@ -1325,6 +1331,7 @@ do_recall(void *__dp)
1325{ 1331{
1326 struct nfs4_delegation *dp = __dp; 1332 struct nfs4_delegation *dp = __dp;
1327 1333
1334 dp->dl_file->fi_had_conflict = true;
1328 nfsd4_cb_recall(dp); 1335 nfsd4_cb_recall(dp);
1329 return 0; 1336 return 0;
1330} 1337}
@@ -3190,20 +3197,49 @@ nfsd4_load_reboot_recovery_data(void)
3190 printk("NFSD: Failure reading reboot recovery data\n"); 3197 printk("NFSD: Failure reading reboot recovery data\n");
3191} 3198}
3192 3199
3200unsigned long
3201get_nfs4_grace_period(void)
3202{
3203 return max(user_lease_time, lease_time) * HZ;
3204}
3205
3206/*
3207 * Since the lifetime of a delegation isn't limited to that of an open, a
3208 * client may quite reasonably hang on to a delegation as long as it has
3209 * the inode cached. This becomes an obvious problem the first time a
3210 * client's inode cache approaches the size of the server's total memory.
3211 *
3212 * For now we avoid this problem by imposing a hard limit on the number
3213 * of delegations, which varies according to the server's memory size.
3214 */
3215static void
3216set_max_delegations(void)
3217{
3218 /*
3219 * Allow at most 4 delegations per megabyte of RAM. Quick
3220 * estimates suggest that in the worst case (where every delegation
3221 * is for a different inode), a delegation could take about 1.5K,
3222 * giving a worst case usage of about 6% of memory.
3223 */
3224 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
3225}
3226
3193/* initialization to perform when the nfsd service is started: */ 3227/* initialization to perform when the nfsd service is started: */
3194 3228
3195static void 3229static void
3196__nfs4_state_start(void) 3230__nfs4_state_start(void)
3197{ 3231{
3198 time_t grace_time; 3232 unsigned long grace_time;
3199 3233
3200 boot_time = get_seconds(); 3234 boot_time = get_seconds();
3201 grace_time = max(user_lease_time, lease_time); 3235 grace_time = get_nfs_grace_period();
3202 lease_time = user_lease_time; 3236 lease_time = user_lease_time;
3203 in_grace = 1; 3237 in_grace = 1;
3204 printk("NFSD: starting %ld-second grace period\n", grace_time); 3238 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
3239 grace_time/HZ);
3205 laundry_wq = create_singlethread_workqueue("nfsd4"); 3240 laundry_wq = create_singlethread_workqueue("nfsd4");
3206 queue_delayed_work(laundry_wq, &laundromat_work, grace_time*HZ); 3241 queue_delayed_work(laundry_wq, &laundromat_work, grace_time);
3242 set_max_delegations();
3207} 3243}
3208 3244
3209int 3245int
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 15809dfd88a5..b3d55c6747fd 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -56,6 +56,8 @@
56#include <linux/nfsd_idmap.h> 56#include <linux/nfsd_idmap.h>
57#include <linux/nfs4.h> 57#include <linux/nfs4.h>
58#include <linux/nfs4_acl.h> 58#include <linux/nfs4_acl.h>
59#include <linux/sunrpc/gss_api.h>
60#include <linux/sunrpc/svcauth_gss.h>
59 61
60#define NFSDDBG_FACILITY NFSDDBG_XDR 62#define NFSDDBG_FACILITY NFSDDBG_XDR
61 63
@@ -819,6 +821,23 @@ nfsd4_decode_renew(struct nfsd4_compoundargs *argp, clientid_t *clientid)
819} 821}
820 822
821static __be32 823static __be32
824nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp,
825 struct nfsd4_secinfo *secinfo)
826{
827 DECODE_HEAD;
828
829 READ_BUF(4);
830 READ32(secinfo->si_namelen);
831 READ_BUF(secinfo->si_namelen);
832 SAVEMEM(secinfo->si_name, secinfo->si_namelen);
833 status = check_filename(secinfo->si_name, secinfo->si_namelen,
834 nfserr_noent);
835 if (status)
836 return status;
837 DECODE_TAIL;
838}
839
840static __be32
822nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr) 841nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr)
823{ 842{
824 DECODE_HEAD; 843 DECODE_HEAD;
@@ -1131,6 +1150,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
1131 case OP_SAVEFH: 1150 case OP_SAVEFH:
1132 op->status = nfs_ok; 1151 op->status = nfs_ok;
1133 break; 1152 break;
1153 case OP_SECINFO:
1154 op->status = nfsd4_decode_secinfo(argp, &op->u.secinfo);
1155 break;
1134 case OP_SETATTR: 1156 case OP_SETATTR:
1135 op->status = nfsd4_decode_setattr(argp, &op->u.setattr); 1157 op->status = nfsd4_decode_setattr(argp, &op->u.setattr);
1136 break; 1158 break;
@@ -1296,7 +1318,7 @@ static char *nfsd4_path(struct svc_rqst *rqstp, struct svc_export *exp, __be32 *
1296 char *path, *rootpath; 1318 char *path, *rootpath;
1297 1319
1298 fh_init(&tmp_fh, NFS4_FHSIZE); 1320 fh_init(&tmp_fh, NFS4_FHSIZE);
1299 *stat = exp_pseudoroot(rqstp->rq_client, &tmp_fh, &rqstp->rq_chandle); 1321 *stat = exp_pseudoroot(rqstp, &tmp_fh);
1300 if (*stat) 1322 if (*stat)
1301 return NULL; 1323 return NULL;
1302 rootpath = tmp_fh.fh_export->ex_path; 1324 rootpath = tmp_fh.fh_export->ex_path;
@@ -1847,11 +1869,19 @@ nfsd4_encode_dirent_fattr(struct nfsd4_readdir *cd,
1847 if (d_mountpoint(dentry)) { 1869 if (d_mountpoint(dentry)) {
1848 int err; 1870 int err;
1849 1871
1872 /*
1873 * Why the heck aren't we just using nfsd_lookup??
1874 * Different "."/".." handling? Something else?
1875 * At least, add a comment here to explain....
1876 */
1850 err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp); 1877 err = nfsd_cross_mnt(cd->rd_rqstp, &dentry, &exp);
1851 if (err) { 1878 if (err) {
1852 nfserr = nfserrno(err); 1879 nfserr = nfserrno(err);
1853 goto out_put; 1880 goto out_put;
1854 } 1881 }
1882 nfserr = check_nfsd_access(exp, cd->rd_rqstp);
1883 if (nfserr)
1884 goto out_put;
1855 1885
1856 } 1886 }
1857 nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval, 1887 nfserr = nfsd4_encode_fattr(NULL, exp, dentry, p, buflen, cd->rd_bmval,
@@ -2419,6 +2449,72 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
2419 } 2449 }
2420} 2450}
2421 2451
2452static void
2453nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, int nfserr,
2454 struct nfsd4_secinfo *secinfo)
2455{
2456 int i = 0;
2457 struct svc_export *exp = secinfo->si_exp;
2458 u32 nflavs;
2459 struct exp_flavor_info *flavs;
2460 struct exp_flavor_info def_flavs[2];
2461 ENCODE_HEAD;
2462
2463 if (nfserr)
2464 goto out;
2465 if (exp->ex_nflavors) {
2466 flavs = exp->ex_flavors;
2467 nflavs = exp->ex_nflavors;
2468 } else { /* Handling of some defaults in absence of real secinfo: */
2469 flavs = def_flavs;
2470 if (exp->ex_client->flavour->flavour == RPC_AUTH_UNIX) {
2471 nflavs = 2;
2472 flavs[0].pseudoflavor = RPC_AUTH_UNIX;
2473 flavs[1].pseudoflavor = RPC_AUTH_NULL;
2474 } else if (exp->ex_client->flavour->flavour == RPC_AUTH_GSS) {
2475 nflavs = 1;
2476 flavs[0].pseudoflavor
2477 = svcauth_gss_flavor(exp->ex_client);
2478 } else {
2479 nflavs = 1;
2480 flavs[0].pseudoflavor
2481 = exp->ex_client->flavour->flavour;
2482 }
2483 }
2484
2485 RESERVE_SPACE(4);
2486 WRITE32(nflavs);
2487 ADJUST_ARGS();
2488 for (i = 0; i < nflavs; i++) {
2489 u32 flav = flavs[i].pseudoflavor;
2490 struct gss_api_mech *gm = gss_mech_get_by_pseudoflavor(flav);
2491
2492 if (gm) {
2493 RESERVE_SPACE(4);
2494 WRITE32(RPC_AUTH_GSS);
2495 ADJUST_ARGS();
2496 RESERVE_SPACE(4 + gm->gm_oid.len);
2497 WRITE32(gm->gm_oid.len);
2498 WRITEMEM(gm->gm_oid.data, gm->gm_oid.len);
2499 ADJUST_ARGS();
2500 RESERVE_SPACE(4);
2501 WRITE32(0); /* qop */
2502 ADJUST_ARGS();
2503 RESERVE_SPACE(4);
2504 WRITE32(gss_pseudoflavor_to_service(gm, flav));
2505 ADJUST_ARGS();
2506 gss_mech_put(gm);
2507 } else {
2508 RESERVE_SPACE(4);
2509 WRITE32(flav);
2510 ADJUST_ARGS();
2511 }
2512 }
2513out:
2514 if (exp)
2515 exp_put(exp);
2516}
2517
2422/* 2518/*
2423 * The SETATTR encode routine is special -- it always encodes a bitmap, 2519 * The SETATTR encode routine is special -- it always encodes a bitmap,
2424 * regardless of the error status. 2520 * regardless of the error status.
@@ -2559,6 +2655,9 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
2559 break; 2655 break;
2560 case OP_SAVEFH: 2656 case OP_SAVEFH:
2561 break; 2657 break;
2658 case OP_SECINFO:
2659 nfsd4_encode_secinfo(resp, op->status, &op->u.secinfo);
2660 break;
2562 case OP_SETATTR: 2661 case OP_SETATTR:
2563 nfsd4_encode_setattr(resp, op->status, &op->u.setattr); 2662 nfsd4_encode_setattr(resp, op->status, &op->u.setattr);
2564 break; 2663 break;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 71c686dc7257..baac89d917ca 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -35,7 +35,6 @@
35#include <linux/nfsd/cache.h> 35#include <linux/nfsd/cache.h>
36#include <linux/nfsd/xdr.h> 36#include <linux/nfsd/xdr.h>
37#include <linux/nfsd/syscall.h> 37#include <linux/nfsd/syscall.h>
38#include <linux/nfsd/interface.h>
39 38
40#include <asm/uaccess.h> 39#include <asm/uaccess.h>
41 40
@@ -245,7 +244,7 @@ static ssize_t write_getfs(struct file *file, char *buf, size_t size)
245 } 244 }
246 exp_readunlock(); 245 exp_readunlock();
247 if (err == 0) 246 if (err == 0)
248 err = res->fh_size + (int)&((struct knfsd_fh*)0)->fh_base; 247 err = res->fh_size + offsetof(struct knfsd_fh, fh_base);
249 out: 248 out:
250 return err; 249 return err;
251} 250}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 6ca2d24fc216..0eb464a39aae 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -15,10 +15,12 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/stat.h> 16#include <linux/stat.h>
17#include <linux/dcache.h> 17#include <linux/dcache.h>
18#include <linux/exportfs.h>
18#include <linux/mount.h> 19#include <linux/mount.h>
19 20
20#include <linux/sunrpc/clnt.h> 21#include <linux/sunrpc/clnt.h>
21#include <linux/sunrpc/svc.h> 22#include <linux/sunrpc/svc.h>
23#include <linux/sunrpc/svcauth_gss.h>
22#include <linux/nfsd/nfsd.h> 24#include <linux/nfsd/nfsd.h>
23 25
24#define NFSDDBG_FACILITY NFSDDBG_FH 26#define NFSDDBG_FACILITY NFSDDBG_FH
@@ -27,10 +29,6 @@
27static int nfsd_nr_verified; 29static int nfsd_nr_verified;
28static int nfsd_nr_put; 30static int nfsd_nr_put;
29 31
30extern struct export_operations export_op_default;
31
32#define CALL(ops,fun) ((ops->fun)?(ops->fun):export_op_default.fun)
33
34/* 32/*
35 * our acceptability function. 33 * our acceptability function.
36 * if NOSUBTREECHECK, accept anything 34 * if NOSUBTREECHECK, accept anything
@@ -123,8 +121,6 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
123 int data_left = fh->fh_size/4; 121 int data_left = fh->fh_size/4;
124 122
125 error = nfserr_stale; 123 error = nfserr_stale;
126 if (rqstp->rq_client == NULL)
127 goto out;
128 if (rqstp->rq_vers > 2) 124 if (rqstp->rq_vers > 2)
129 error = nfserr_badhandle; 125 error = nfserr_badhandle;
130 if (rqstp->rq_vers == 4 && fh->fh_size == 0) 126 if (rqstp->rq_vers == 4 && fh->fh_size == 0)
@@ -148,7 +144,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
148 fh->fh_fsid[1] = fh->fh_fsid[2]; 144 fh->fh_fsid[1] = fh->fh_fsid[2];
149 } 145 }
150 if ((data_left -= len)<0) goto out; 146 if ((data_left -= len)<0) goto out;
151 exp = exp_find(rqstp->rq_client, fh->fh_fsid_type, datap, &rqstp->rq_chandle); 147 exp = rqst_exp_find(rqstp, fh->fh_fsid_type, datap);
152 datap += len; 148 datap += len;
153 } else { 149 } else {
154 dev_t xdev; 150 dev_t xdev;
@@ -159,19 +155,17 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
159 xdev = old_decode_dev(fh->ofh_xdev); 155 xdev = old_decode_dev(fh->ofh_xdev);
160 xino = u32_to_ino_t(fh->ofh_xino); 156 xino = u32_to_ino_t(fh->ofh_xino);
161 mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL); 157 mk_fsid(FSID_DEV, tfh, xdev, xino, 0, NULL);
162 exp = exp_find(rqstp->rq_client, FSID_DEV, tfh, 158 exp = rqst_exp_find(rqstp, FSID_DEV, tfh);
163 &rqstp->rq_chandle);
164 } 159 }
165 160
166 if (IS_ERR(exp) && (PTR_ERR(exp) == -EAGAIN 161 error = nfserr_stale;
167 || PTR_ERR(exp) == -ETIMEDOUT)) { 162 if (PTR_ERR(exp) == -ENOENT)
168 error = nfserrno(PTR_ERR(exp));
169 goto out; 163 goto out;
170 }
171 164
172 error = nfserr_stale; 165 if (IS_ERR(exp)) {
173 if (!exp || IS_ERR(exp)) 166 error = nfserrno(PTR_ERR(exp));
174 goto out; 167 goto out;
168 }
175 169
176 /* Check if the request originated from a secure port. */ 170 /* Check if the request originated from a secure port. */
177 error = nfserr_perm; 171 error = nfserr_perm;
@@ -211,11 +205,9 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
211 if (fileid_type == 0) 205 if (fileid_type == 0)
212 dentry = dget(exp->ex_dentry); 206 dentry = dget(exp->ex_dentry);
213 else { 207 else {
214 struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op; 208 dentry = exportfs_decode_fh(exp->ex_mnt, datap,
215 dentry = CALL(nop,decode_fh)(exp->ex_mnt->mnt_sb, 209 data_left, fileid_type,
216 datap, data_left, 210 nfsd_acceptable, exp);
217 fileid_type,
218 nfsd_acceptable, exp);
219 } 211 }
220 if (dentry == NULL) 212 if (dentry == NULL)
221 goto out; 213 goto out;
@@ -257,8 +249,19 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access)
257 if (error) 249 if (error)
258 goto out; 250 goto out;
259 251
252 if (!(access & MAY_LOCK)) {
253 /*
254 * pseudoflavor restrictions are not enforced on NLM,
255 * which clients virtually always use auth_sys for,
256 * even while using RPCSEC_GSS for NFS.
257 */
258 error = check_nfsd_access(exp, rqstp);
259 if (error)
260 goto out;
261 }
262
260 /* Finally, check access permissions. */ 263 /* Finally, check access permissions. */
261 error = nfsd_permission(exp, dentry, access); 264 error = nfsd_permission(rqstp, exp, dentry, access);
262 265
263 if (error) { 266 if (error) {
264 dprintk("fh_verify: %s/%s permission failure, " 267 dprintk("fh_verify: %s/%s permission failure, "
@@ -286,15 +289,13 @@ out:
286static inline int _fh_update(struct dentry *dentry, struct svc_export *exp, 289static inline int _fh_update(struct dentry *dentry, struct svc_export *exp,
287 __u32 *datap, int *maxsize) 290 __u32 *datap, int *maxsize)
288{ 291{
289 struct export_operations *nop = exp->ex_mnt->mnt_sb->s_export_op;
290
291 if (dentry == exp->ex_dentry) { 292 if (dentry == exp->ex_dentry) {
292 *maxsize = 0; 293 *maxsize = 0;
293 return 0; 294 return 0;
294 } 295 }
295 296
296 return CALL(nop,encode_fh)(dentry, datap, maxsize, 297 return exportfs_encode_fh(dentry, datap, maxsize,
297 !(exp->ex_flags&NFSEXP_NOSUBTREECHECK)); 298 !(exp->ex_flags & NFSEXP_NOSUBTREECHECK));
298} 299}
299 300
300/* 301/*
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index b2c7147aa921..977a71f64e19 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -278,7 +278,8 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
278 * echo thing > device-special-file-or-pipe 278 * echo thing > device-special-file-or-pipe
279 * by doing a CREATE with type==0 279 * by doing a CREATE with type==0
280 */ 280 */
281 nfserr = nfsd_permission(newfhp->fh_export, 281 nfserr = nfsd_permission(rqstp,
282 newfhp->fh_export,
282 newfhp->fh_dentry, 283 newfhp->fh_dentry,
283 MAY_WRITE|MAY_LOCAL_ACCESS); 284 MAY_WRITE|MAY_LOCAL_ACCESS);
284 if (nfserr && nfserr != nfserr_rofs) 285 if (nfserr && nfserr != nfserr_rofs)
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ff55950efb43..a8c89ae4c743 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -19,6 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/smp_lock.h> 21#include <linux/smp_lock.h>
22#include <linux/freezer.h>
22#include <linux/fs_struct.h> 23#include <linux/fs_struct.h>
23 24
24#include <linux/sunrpc/types.h> 25#include <linux/sunrpc/types.h>
@@ -432,6 +433,7 @@ nfsd(struct svc_rqst *rqstp)
432 * dirty pages. 433 * dirty pages.
433 */ 434 */
434 current->flags |= PF_LESS_THROTTLE; 435 current->flags |= PF_LESS_THROTTLE;
436 set_freezable();
435 437
436 /* 438 /*
437 * The main request loop 439 * The main request loop
@@ -492,6 +494,15 @@ out:
492 module_put_and_exit(0); 494 module_put_and_exit(0);
493} 495}
494 496
497static __be32 map_new_errors(u32 vers, __be32 nfserr)
498{
499 if (nfserr == nfserr_jukebox && vers == 2)
500 return nfserr_dropit;
501 if (nfserr == nfserr_wrongsec && vers < 4)
502 return nfserr_acces;
503 return nfserr;
504}
505
495int 506int
496nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) 507nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
497{ 508{
@@ -534,6 +545,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
534 545
535 /* Now call the procedure handler, and encode NFS status. */ 546 /* Now call the procedure handler, and encode NFS status. */
536 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); 547 nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
548 nfserr = map_new_errors(rqstp->rq_vers, nfserr);
537 if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2) 549 if (nfserr == nfserr_jukebox && rqstp->rq_vers == 2)
538 nfserr = nfserr_dropit; 550 nfserr = nfserr_dropit;
539 if (nfserr == nfserr_dropit) { 551 if (nfserr == nfserr_dropit) {
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 945b1cedde2b..e90f4a8a1d01 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -113,7 +113,7 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
113 113
114 while (follow_down(&mnt,&mounts)&&d_mountpoint(mounts)); 114 while (follow_down(&mnt,&mounts)&&d_mountpoint(mounts));
115 115
116 exp2 = exp_get_by_name(exp->ex_client, mnt, mounts, &rqstp->rq_chandle); 116 exp2 = rqst_exp_get_by_name(rqstp, mnt, mounts);
117 if (IS_ERR(exp2)) { 117 if (IS_ERR(exp2)) {
118 err = PTR_ERR(exp2); 118 err = PTR_ERR(exp2);
119 dput(mounts); 119 dput(mounts);
@@ -135,21 +135,10 @@ out:
135 return err; 135 return err;
136} 136}
137 137
138/*
139 * Look up one component of a pathname.
140 * N.B. After this call _both_ fhp and resfh need an fh_put
141 *
142 * If the lookup would cross a mountpoint, and the mounted filesystem
143 * is exported to the client with NFSEXP_NOHIDE, then the lookup is
144 * accepted as it stands and the mounted directory is
145 * returned. Otherwise the covered directory is returned.
146 * NOTE: this mountpoint crossing is not supported properly by all
147 * clients and is explicitly disallowed for NFSv3
148 * NeilBrown <neilb@cse.unsw.edu.au>
149 */
150__be32 138__be32
151nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name, 139nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
152 int len, struct svc_fh *resfh) 140 const char *name, int len,
141 struct svc_export **exp_ret, struct dentry **dentry_ret)
153{ 142{
154 struct svc_export *exp; 143 struct svc_export *exp;
155 struct dentry *dparent; 144 struct dentry *dparent;
@@ -168,8 +157,6 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
168 exp = fhp->fh_export; 157 exp = fhp->fh_export;
169 exp_get(exp); 158 exp_get(exp);
170 159
171 err = nfserr_acces;
172
173 /* Lookup the name, but don't follow links */ 160 /* Lookup the name, but don't follow links */
174 if (isdotent(name, len)) { 161 if (isdotent(name, len)) {
175 if (len==1) 162 if (len==1)
@@ -190,17 +177,15 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
190 dput(dentry); 177 dput(dentry);
191 dentry = dp; 178 dentry = dp;
192 179
193 exp2 = exp_parent(exp->ex_client, mnt, dentry, 180 exp2 = rqst_exp_parent(rqstp, mnt, dentry);
194 &rqstp->rq_chandle); 181 if (PTR_ERR(exp2) == -ENOENT) {
195 if (IS_ERR(exp2)) { 182 dput(dentry);
183 dentry = dget(dparent);
184 } else if (IS_ERR(exp2)) {
196 host_err = PTR_ERR(exp2); 185 host_err = PTR_ERR(exp2);
197 dput(dentry); 186 dput(dentry);
198 mntput(mnt); 187 mntput(mnt);
199 goto out_nfserr; 188 goto out_nfserr;
200 }
201 if (!exp2) {
202 dput(dentry);
203 dentry = dget(dparent);
204 } else { 189 } else {
205 exp_put(exp); 190 exp_put(exp);
206 exp = exp2; 191 exp = exp2;
@@ -223,6 +208,41 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
223 } 208 }
224 } 209 }
225 } 210 }
211 *dentry_ret = dentry;
212 *exp_ret = exp;
213 return 0;
214
215out_nfserr:
216 exp_put(exp);
217 return nfserrno(host_err);
218}
219
220/*
221 * Look up one component of a pathname.
222 * N.B. After this call _both_ fhp and resfh need an fh_put
223 *
224 * If the lookup would cross a mountpoint, and the mounted filesystem
225 * is exported to the client with NFSEXP_NOHIDE, then the lookup is
226 * accepted as it stands and the mounted directory is
227 * returned. Otherwise the covered directory is returned.
228 * NOTE: this mountpoint crossing is not supported properly by all
229 * clients and is explicitly disallowed for NFSv3
230 * NeilBrown <neilb@cse.unsw.edu.au>
231 */
232__be32
233nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
234 int len, struct svc_fh *resfh)
235{
236 struct svc_export *exp;
237 struct dentry *dentry;
238 __be32 err;
239
240 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
241 if (err)
242 return err;
243 err = check_nfsd_access(exp, rqstp);
244 if (err)
245 goto out;
226 /* 246 /*
227 * Note: we compose the file handle now, but as the 247 * Note: we compose the file handle now, but as the
228 * dentry may be negative, it may need to be updated. 248 * dentry may be negative, it may need to be updated.
@@ -230,16 +250,13 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
230 err = fh_compose(resfh, exp, dentry, fhp); 250 err = fh_compose(resfh, exp, dentry, fhp);
231 if (!err && !dentry->d_inode) 251 if (!err && !dentry->d_inode)
232 err = nfserr_noent; 252 err = nfserr_noent;
233 dput(dentry);
234out: 253out:
254 dput(dentry);
235 exp_put(exp); 255 exp_put(exp);
236 return err; 256 return err;
237
238out_nfserr:
239 err = nfserrno(host_err);
240 goto out;
241} 257}
242 258
259
243/* 260/*
244 * Set various file attributes. 261 * Set various file attributes.
245 * N.B. After this call fhp needs an fh_put 262 * N.B. After this call fhp needs an fh_put
@@ -311,7 +328,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
311 /* The size case is special. It changes the file as well as the attributes. */ 328 /* The size case is special. It changes the file as well as the attributes. */
312 if (iap->ia_valid & ATTR_SIZE) { 329 if (iap->ia_valid & ATTR_SIZE) {
313 if (iap->ia_size < inode->i_size) { 330 if (iap->ia_size < inode->i_size) {
314 err = nfsd_permission(fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE); 331 err = nfsd_permission(rqstp, fhp->fh_export, dentry, MAY_TRUNC|MAY_OWNER_OVERRIDE);
315 if (err) 332 if (err)
316 goto out; 333 goto out;
317 } 334 }
@@ -435,7 +452,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
435 /* Get inode */ 452 /* Get inode */
436 error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, MAY_SATTR); 453 error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, MAY_SATTR);
437 if (error) 454 if (error)
438 goto out; 455 return error;
439 456
440 dentry = fhp->fh_dentry; 457 dentry = fhp->fh_dentry;
441 inode = dentry->d_inode; 458 inode = dentry->d_inode;
@@ -444,33 +461,25 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
444 461
445 host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags); 462 host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
446 if (host_error == -EINVAL) { 463 if (host_error == -EINVAL) {
447 error = nfserr_attrnotsupp; 464 return nfserr_attrnotsupp;
448 goto out;
449 } else if (host_error < 0) 465 } else if (host_error < 0)
450 goto out_nfserr; 466 goto out_nfserr;
451 467
452 host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS); 468 host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
453 if (host_error < 0) 469 if (host_error < 0)
454 goto out_nfserr; 470 goto out_release;
455 471
456 if (S_ISDIR(inode->i_mode)) { 472 if (S_ISDIR(inode->i_mode))
457 host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT); 473 host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
458 if (host_error < 0)
459 goto out_nfserr;
460 }
461
462 error = nfs_ok;
463 474
464out: 475out_release:
465 posix_acl_release(pacl); 476 posix_acl_release(pacl);
466 posix_acl_release(dpacl); 477 posix_acl_release(dpacl);
467 return (error);
468out_nfserr: 478out_nfserr:
469 if (host_error == -EOPNOTSUPP) 479 if (host_error == -EOPNOTSUPP)
470 error = nfserr_attrnotsupp; 480 return nfserr_attrnotsupp;
471 else 481 else
472 error = nfserrno(host_error); 482 return nfserrno(host_error);
473 goto out;
474} 483}
475 484
476static struct posix_acl * 485static struct posix_acl *
@@ -607,7 +616,7 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
607 616
608 sresult |= map->access; 617 sresult |= map->access;
609 618
610 err2 = nfsd_permission(export, dentry, map->how); 619 err2 = nfsd_permission(rqstp, export, dentry, map->how);
611 switch (err2) { 620 switch (err2) {
612 case nfs_ok: 621 case nfs_ok:
613 result |= map->access; 622 result |= map->access;
@@ -1034,7 +1043,7 @@ nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1034 __be32 err; 1043 __be32 err;
1035 1044
1036 if (file) { 1045 if (file) {
1037 err = nfsd_permission(fhp->fh_export, fhp->fh_dentry, 1046 err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
1038 MAY_READ|MAY_OWNER_OVERRIDE); 1047 MAY_READ|MAY_OWNER_OVERRIDE);
1039 if (err) 1048 if (err)
1040 goto out; 1049 goto out;
@@ -1063,7 +1072,7 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
1063 __be32 err = 0; 1072 __be32 err = 0;
1064 1073
1065 if (file) { 1074 if (file) {
1066 err = nfsd_permission(fhp->fh_export, fhp->fh_dentry, 1075 err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
1067 MAY_WRITE|MAY_OWNER_OVERRIDE); 1076 MAY_WRITE|MAY_OWNER_OVERRIDE);
1068 if (err) 1077 if (err)
1069 goto out; 1078 goto out;
@@ -1792,7 +1801,8 @@ nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat)
1792 * Check for a user's access permissions to this inode. 1801 * Check for a user's access permissions to this inode.
1793 */ 1802 */
1794__be32 1803__be32
1795nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc) 1804nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
1805 struct dentry *dentry, int acc)
1796{ 1806{
1797 struct inode *inode = dentry->d_inode; 1807 struct inode *inode = dentry->d_inode;
1798 int err; 1808 int err;
@@ -1823,7 +1833,7 @@ nfsd_permission(struct svc_export *exp, struct dentry *dentry, int acc)
1823 */ 1833 */
1824 if (!(acc & MAY_LOCAL_ACCESS)) 1834 if (!(acc & MAY_LOCAL_ACCESS))
1825 if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) { 1835 if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
1826 if (EX_RDONLY(exp) || IS_RDONLY(inode)) 1836 if (EX_RDONLY(exp, rqstp) || IS_RDONLY(inode))
1827 return nfserr_rofs; 1837 return nfserr_rofs;
1828 if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode)) 1838 if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
1829 return nfserr_perm; 1839 return nfserr_perm;
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index bff01a54675a..e93c6142b23c 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/dcache.h> 23#include <linux/dcache.h>
24#include <linux/exportfs.h>
24#include <linux/security.h> 25#include <linux/security.h>
25 26
26#include "attrib.h" 27#include "attrib.h"
diff --git a/fs/ocfs2/export.h b/fs/ocfs2/export.h
index 5b77ee7866ef..e08bed9e45a0 100644
--- a/fs/ocfs2/export.h
+++ b/fs/ocfs2/export.h
@@ -26,6 +26,8 @@
26#ifndef OCFS2_EXPORT_H 26#ifndef OCFS2_EXPORT_H
27#define OCFS2_EXPORT_H 27#define OCFS2_EXPORT_H
28 28
29#include <linux/exportfs.h>
30
29extern struct export_operations ocfs2_export_ops; 31extern struct export_operations ocfs2_export_ops;
30 32
31#endif /* OCFS2_EXPORT_H */ 33#endif /* OCFS2_EXPORT_H */
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index f04c7aa834cb..004c2abbc732 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1867,7 +1867,8 @@ static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1867 loff_t pos; 1867 loff_t pos;
1868 const struct iovec *cur_iov = iov; 1868 const struct iovec *cur_iov = iov;
1869 struct page *user_page, *page; 1869 struct page *user_page, *page;
1870 char *buf, *dst; 1870 char * uninitialized_var(buf);
1871 char *dst;
1871 void *fsdata; 1872 void *fsdata;
1872 1873
1873 /* 1874 /*
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index bd68c3f2afbe..87dcece7e1b5 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -63,7 +63,7 @@ static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
63 goto bail_unlock; 63 goto bail_unlock;
64 64
65 status = -EACCES; 65 status = -EACCES;
66 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 66 if (!is_owner_or_cap(inode))
67 goto bail_unlock; 67 goto bail_unlock;
68 68
69 if (!S_ISDIR(inode->i_mode)) 69 if (!S_ISDIR(inode->i_mode))
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ae3627337a92..42cb4f5613b6 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -283,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
283static int proc_pid_wchan(struct task_struct *task, char *buffer) 283static int proc_pid_wchan(struct task_struct *task, char *buffer)
284{ 284{
285 unsigned long wchan; 285 unsigned long wchan;
286 char symname[KSYM_NAME_LEN+1]; 286 char symname[KSYM_NAME_LEN];
287 287
288 wchan = get_wchan(task); 288 wchan = get_wchan(task);
289 289
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index d40d22b347b7..ef2b46d099ff 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -60,6 +60,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev)
60 inode->i_blocks = 0; 60 inode->i_blocks = 0;
61 inode->i_mapping->a_ops = &ramfs_aops; 61 inode->i_mapping->a_ops = &ramfs_aops;
62 inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; 62 inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
63 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
63 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 64 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
64 switch (mode & S_IFMT) { 65 switch (mode & S_IFMT) {
65 default: 66 default:
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1272d11399fb..ddde489f1cb2 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -7,6 +7,7 @@
7#include <linux/reiserfs_fs.h> 7#include <linux/reiserfs_fs.h>
8#include <linux/reiserfs_acl.h> 8#include <linux/reiserfs_acl.h>
9#include <linux/reiserfs_xattr.h> 9#include <linux/reiserfs_xattr.h>
10#include <linux/exportfs.h>
10#include <linux/smp_lock.h> 11#include <linux/smp_lock.h>
11#include <linux/pagemap.h> 12#include <linux/pagemap.h>
12#include <linux/highmem.h> 13#include <linux/highmem.h>
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index b484d2913c0d..11a0fcc2d402 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -51,8 +51,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
51 if (IS_RDONLY(inode)) 51 if (IS_RDONLY(inode))
52 return -EROFS; 52 return -EROFS;
53 53
54 if ((current->fsuid != inode->i_uid) 54 if (!is_owner_or_cap(inode))
55 && !capable(CAP_FOWNER))
56 return -EPERM; 55 return -EPERM;
57 56
58 if (get_user(flags, (int __user *)arg)) 57 if (get_user(flags, (int __user *)arg))
@@ -81,7 +80,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
81 case REISERFS_IOC_GETVERSION: 80 case REISERFS_IOC_GETVERSION:
82 return put_user(inode->i_generation, (int __user *)arg); 81 return put_user(inode->i_generation, (int __user *)arg);
83 case REISERFS_IOC_SETVERSION: 82 case REISERFS_IOC_SETVERSION:
84 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 83 if (!is_owner_or_cap(inode))
85 return -EPERM; 84 return -EPERM;
86 if (IS_RDONLY(inode)) 85 if (IS_RDONLY(inode))
87 return -EROFS; 86 return -EROFS;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index b4ac9119200e..5a93cfe1a032 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/blkdev.h> 22#include <linux/blkdev.h>
23#include <linux/buffer_head.h> 23#include <linux/buffer_head.h>
24#include <linux/exportfs.h>
24#include <linux/vfs.h> 25#include <linux/vfs.h>
25#include <linux/mnt_namespace.h> 26#include <linux/mnt_namespace.h>
26#include <linux/mount.h> 27#include <linux/mount.h>
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 5296a29cc5eb..b7e4fa4539de 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -21,7 +21,7 @@ xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
21 21
22 if (!reiserfs_posixacl(inode->i_sb)) 22 if (!reiserfs_posixacl(inode->i_sb))
23 return -EOPNOTSUPP; 23 return -EOPNOTSUPP;
24 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 24 if (!is_owner_or_cap(inode))
25 return -EPERM; 25 return -EPERM;
26 26
27 if (value) { 27 if (value) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 6658afb41cc7..d6a504f5d758 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1356,7 +1356,7 @@ udf_load_partition(struct super_block *sb, kernel_lb_addr *fileset)
1356 case UDF_VIRTUAL_MAP15: 1356 case UDF_VIRTUAL_MAP15:
1357 case UDF_VIRTUAL_MAP20: 1357 case UDF_VIRTUAL_MAP20:
1358 { 1358 {
1359 kernel_lb_addr ino; 1359 kernel_lb_addr uninitialized_var(ino);
1360 1360
1361 if (!UDF_SB_LASTBLOCK(sb)) 1361 if (!UDF_SB_LASTBLOCK(sb))
1362 { 1362 {
diff --git a/fs/utimes.c b/fs/utimes.c
index 83a7e69e706c..682eb63b20ad 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -106,7 +106,7 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
106 if (IS_IMMUTABLE(inode)) 106 if (IS_IMMUTABLE(inode))
107 goto dput_and_out; 107 goto dput_and_out;
108 108
109 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) { 109 if (!is_owner_or_cap(inode)) {
110 if (f) { 110 if (f) {
111 if (!(f->f_mode & FMODE_WRITE)) 111 if (!(f->f_mode & FMODE_WRITE))
112 goto dput_and_out; 112 goto dput_and_out;
diff --git a/fs/xattr.c b/fs/xattr.c
index 4523aca79659..a44fd92caca3 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -60,8 +60,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
60 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) 60 if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
61 return -EPERM; 61 return -EPERM;
62 if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) && 62 if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
63 (mask & MAY_WRITE) && (current->fsuid != inode->i_uid) && 63 (mask & MAY_WRITE) && !is_owner_or_cap(inode))
64 !capable(CAP_FOWNER))
65 return -EPERM; 64 return -EPERM;
66 } 65 }
67 66
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 2df63622354e..b0f0e58866de 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -35,10 +35,13 @@
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36 36
37static kmem_zone_t *xfs_buf_zone; 37static kmem_zone_t *xfs_buf_zone;
38static struct shrinker *xfs_buf_shake;
39STATIC int xfsbufd(void *); 38STATIC int xfsbufd(void *);
40STATIC int xfsbufd_wakeup(int, gfp_t); 39STATIC int xfsbufd_wakeup(int, gfp_t);
41STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 40STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
41static struct shrinker xfs_buf_shake = {
42 .shrink = xfsbufd_wakeup,
43 .seeks = DEFAULT_SEEKS,
44};
42 45
43static struct workqueue_struct *xfslogd_workqueue; 46static struct workqueue_struct *xfslogd_workqueue;
44struct workqueue_struct *xfsdatad_workqueue; 47struct workqueue_struct *xfsdatad_workqueue;
@@ -1832,14 +1835,9 @@ xfs_buf_init(void)
1832 if (!xfsdatad_workqueue) 1835 if (!xfsdatad_workqueue)
1833 goto out_destroy_xfslogd_workqueue; 1836 goto out_destroy_xfslogd_workqueue;
1834 1837
1835 xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup); 1838 register_shrinker(&xfs_buf_shake);
1836 if (!xfs_buf_shake)
1837 goto out_destroy_xfsdatad_workqueue;
1838
1839 return 0; 1839 return 0;
1840 1840
1841 out_destroy_xfsdatad_workqueue:
1842 destroy_workqueue(xfsdatad_workqueue);
1843 out_destroy_xfslogd_workqueue: 1841 out_destroy_xfslogd_workqueue:
1844 destroy_workqueue(xfslogd_workqueue); 1842 destroy_workqueue(xfslogd_workqueue);
1845 out_free_buf_zone: 1843 out_free_buf_zone:
@@ -1854,7 +1852,7 @@ xfs_buf_init(void)
1854void 1852void
1855xfs_buf_terminate(void) 1853xfs_buf_terminate(void)
1856{ 1854{
1857 remove_shrinker(xfs_buf_shake); 1855 unregister_shrinker(&xfs_buf_shake);
1858 destroy_workqueue(xfsdatad_workqueue); 1856 destroy_workqueue(xfsdatad_workqueue);
1859 destroy_workqueue(xfslogd_workqueue); 1857 destroy_workqueue(xfslogd_workqueue);
1860 kmem_zone_destroy(xfs_buf_zone); 1858 kmem_zone_destroy(xfs_buf_zone);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 06894cf00b12..4528f9a3f304 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -562,6 +562,7 @@ xfssyncd(
562 bhv_vfs_sync_work_t *work, *n; 562 bhv_vfs_sync_work_t *work, *n;
563 LIST_HEAD (tmp); 563 LIST_HEAD (tmp);
564 564
565 set_freezable();
565 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); 566 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
566 for (;;) { 567 for (;;) {
567 timeleft = schedule_timeout_interruptible(timeleft); 568 timeleft = schedule_timeout_interruptible(timeleft);
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 33dd1ca13245..201cc3273c84 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -18,6 +18,8 @@
18#ifndef __XFS_SUPER_H__ 18#ifndef __XFS_SUPER_H__
19#define __XFS_SUPER_H__ 19#define __XFS_SUPER_H__
20 20
21#include <linux/exportfs.h>
22
21#ifdef CONFIG_XFS_DMAPI 23#ifdef CONFIG_XFS_DMAPI
22# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops) 24# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops)
23# define vfs_initdmapi() dmapi_init() 25# define vfs_initdmapi() dmapi_init()
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 7def4c699343..2d274b23ade5 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -62,7 +62,6 @@ uint ndquot;
62 62
63kmem_zone_t *qm_dqzone; 63kmem_zone_t *qm_dqzone;
64kmem_zone_t *qm_dqtrxzone; 64kmem_zone_t *qm_dqtrxzone;
65static struct shrinker *xfs_qm_shaker;
66 65
67static cred_t xfs_zerocr; 66static cred_t xfs_zerocr;
68 67
@@ -78,6 +77,11 @@ STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
78STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 77STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
79STATIC int xfs_qm_shake(int, gfp_t); 78STATIC int xfs_qm_shake(int, gfp_t);
80 79
80static struct shrinker xfs_qm_shaker = {
81 .shrink = xfs_qm_shake,
82 .seeks = DEFAULT_SEEKS,
83};
84
81#ifdef DEBUG 85#ifdef DEBUG
82extern mutex_t qcheck_lock; 86extern mutex_t qcheck_lock;
83#endif 87#endif
@@ -149,7 +153,7 @@ xfs_Gqm_init(void)
149 } else 153 } else
150 xqm->qm_dqzone = qm_dqzone; 154 xqm->qm_dqzone = qm_dqzone;
151 155
152 xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake); 156 register_shrinker(&xfs_qm_shaker);
153 157
154 /* 158 /*
155 * The t_dqinfo portion of transactions. 159 * The t_dqinfo portion of transactions.
@@ -181,7 +185,7 @@ xfs_qm_destroy(
181 185
182 ASSERT(xqm != NULL); 186 ASSERT(xqm != NULL);
183 ASSERT(xqm->qm_nrefs == 0); 187 ASSERT(xqm->qm_nrefs == 0);
184 remove_shrinker(xfs_qm_shaker); 188 unregister_shrinker(&xfs_qm_shaker);
185 hsize = xqm->qm_dqhashmask + 1; 189 hsize = xqm->qm_dqhashmask + 1;
186 for (i = 0; i < hsize; i++) { 190 for (i = 0; i < hsize; i++) {
187 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); 191 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
diff --git a/include/asm-alpha/fb.h b/include/asm-alpha/fb.h
new file mode 100644
index 000000000000..fa9bbb96b2b3
--- /dev/null
+++ b/include/asm-alpha/fb.h
@@ -0,0 +1,13 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/device.h>
4
5/* Caching is off in the I/O space quadrant by design. */
6#define fb_pgprotect(...) do {} while (0)
7
8static inline int fb_is_primary_device(struct fb_info *info)
9{
10 return 0;
11}
12
13#endif /* _ASM_FB_H_ */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index d2bed3cb33ff..bae7f05716d4 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -17,7 +17,8 @@
17extern void clear_page(void *page); 17extern void clear_page(void *page);
18#define clear_user_page(page, vaddr, pg) clear_page(page) 18#define clear_user_page(page, vaddr, pg) clear_page(page)
19 19
20#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vmaddr) 20#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
21 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
21#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 22#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
22 23
23extern void copy_page(void * _to, void * _from); 24extern void copy_page(void * _to, void * _from);
diff --git a/include/asm-alpha/termios.h b/include/asm-alpha/termios.h
index 39e492c3bfa3..fa13716a11c3 100644
--- a/include/asm-alpha/termios.h
+++ b/include/asm-alpha/termios.h
@@ -81,7 +81,7 @@ struct termio {
81 81
82#define user_termio_to_kernel_termios(a_termios, u_termio) \ 82#define user_termio_to_kernel_termios(a_termios, u_termio) \
83({ \ 83({ \
84 struct termios *k_termios = (a_termios); \ 84 struct ktermios *k_termios = (a_termios); \
85 struct termio k_termio; \ 85 struct termio k_termio; \
86 int canon, ret; \ 86 int canon, ret; \
87 \ 87 \
@@ -113,7 +113,7 @@ struct termio {
113 */ 113 */
114#define kernel_termios_to_user_termio(u_termio, a_termios) \ 114#define kernel_termios_to_user_termio(u_termio, a_termios) \
115({ \ 115({ \
116 struct termios *k_termios = (a_termios); \ 116 struct ktermios *k_termios = (a_termios); \
117 struct termio k_termio; \ 117 struct termio k_termio; \
118 int canon; \ 118 int canon; \
119 \ 119 \
diff --git a/include/asm-arm/fb.h b/include/asm-arm/fb.h
new file mode 100644
index 000000000000..d92e99cd8c8a
--- /dev/null
+++ b/include/asm-arm/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index cb4c2c9d000a..d2e8171d1d4e 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -83,14 +83,14 @@
83 * means that a write to a clean page will cause a permission fault, and 83 * means that a write to a clean page will cause a permission fault, and
84 * the Linux MM layer will mark the page dirty via handle_pte_fault(). 84 * the Linux MM layer will mark the page dirty via handle_pte_fault().
85 * For the hardware to notice the permission change, the TLB entry must 85 * For the hardware to notice the permission change, the TLB entry must
86 * be flushed, and ptep_establish() does that for us. 86 * be flushed, and ptep_set_access_flags() does that for us.
87 * 87 *
88 * The "accessed" or "young" bit is emulated by a similar method; we only 88 * The "accessed" or "young" bit is emulated by a similar method; we only
89 * allow accesses to the page if the "young" bit is set. Accesses to the 89 * allow accesses to the page if the "young" bit is set. Accesses to the
90 * page will cause a fault, and handle_pte_fault() will set the young bit 90 * page will cause a fault, and handle_pte_fault() will set the young bit
91 * for us as long as the page is marked present in the corresponding Linux 91 * for us as long as the page is marked present in the corresponding Linux
92 * PTE entry. Again, ptep_establish() will ensure that the TLB is up to 92 * PTE entry. Again, ptep_set_access_flags() will ensure that the TLB is
93 * date. 93 * up to date.
94 * 94 *
95 * However, when the "young" bit is cleared, we deny access to the page 95 * However, when the "young" bit is cleared, we deny access to the page
96 * by clearing the hardware PTE. Currently Linux does not flush the TLB 96 * by clearing the hardware PTE. Currently Linux does not flush the TLB
diff --git a/include/asm-arm26/fb.h b/include/asm-arm26/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-arm26/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-avr32/fb.h b/include/asm-avr32/fb.h
new file mode 100644
index 000000000000..41baf84ad402
--- /dev/null
+++ b/include/asm-avr32/fb.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
12 & ~_PAGE_CACHABLE)
13 | (_PAGE_BUFFER | _PAGE_DIRTY));
14}
15
16static inline int fb_is_primary_device(struct fb_info *info)
17{
18 return 0;
19}
20
21#endif /* _ASM_FB_H_ */
diff --git a/include/asm-blackfin/fb.h b/include/asm-blackfin/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-blackfin/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-cris/fb.h b/include/asm-cris/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-cris/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h
index 9f13c32552bf..0648e3153f81 100644
--- a/include/asm-cris/page.h
+++ b/include/asm-cris/page.h
@@ -20,7 +20,8 @@
20#define clear_user_page(page, vaddr, pg) clear_page(page) 20#define clear_user_page(page, vaddr, pg) clear_page(page)
21#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 21#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
22 22
23#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 23#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
24 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
24#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 25#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
25 26
26/* 27/*
diff --git a/include/asm-frv/fb.h b/include/asm-frv/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-frv/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index adde69985255..147e995bec24 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -388,13 +388,6 @@ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte |= _PAGE_DIRTY; return pt
388static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; } 388static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte |= _PAGE_ACCESSED; return pte; }
389static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; } 389static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte &= ~_PAGE_WP; return pte; }
390 390
391static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
392{
393 int i = test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
394 asm volatile("dcf %M0" :: "U"(*ptep));
395 return i;
396}
397
398static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 391static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
399{ 392{
400 int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); 393 int i = test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
@@ -504,7 +497,6 @@ static inline int pte_file(pte_t pte)
504 remap_pfn_range(vma, vaddr, pfn, size, prot) 497 remap_pfn_range(vma, vaddr, pfn, size, prot)
505 498
506#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 499#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
507#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
508#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 500#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
509#define __HAVE_ARCH_PTEP_SET_WRPROTECT 501#define __HAVE_ARCH_PTEP_SET_WRPROTECT
510#define __HAVE_ARCH_PTE_SAME 502#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 7f30cce52857..344e3091af24 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -28,7 +28,7 @@ struct bug_entry {
28#endif 28#endif
29 29
30#ifndef HAVE_ARCH_BUG_ON 30#ifndef HAVE_ARCH_BUG_ON
31#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) 31#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
32#endif 32#endif
33 33
34#ifndef HAVE_ARCH_WARN_ON 34#ifndef HAVE_ARCH_WARN_ON
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 7d7bcf990e99..f605e8d0eed3 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -3,25 +3,6 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#ifndef __HAVE_ARCH_PTEP_ESTABLISH
7/*
8 * Establish a new mapping:
9 * - flush the old one
10 * - update the page tables
11 * - inform the TLB about the new one
12 *
13 * We hold the mm semaphore for reading, and the pte lock.
14 *
15 * Note: the old pte is known to not be writable, so we don't need to
16 * worry about dirty bits etc getting lost.
17 */
18#define ptep_establish(__vma, __address, __ptep, __entry) \
19do { \
20 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
21 flush_tlb_page(__vma, __address); \
22} while (0)
23#endif
24
25#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 6#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
26/* 7/*
27 * Largely same as above, but only sets the access flags (dirty, 8 * Largely same as above, but only sets the access flags (dirty,
@@ -68,31 +49,6 @@ do { \
68}) 49})
69#endif 50#endif
70 51
71#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
72#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \
73({ \
74 pte_t __pte = *__ptep; \
75 int r = 1; \
76 if (!pte_dirty(__pte)) \
77 r = 0; \
78 else \
79 set_pte_at((__vma)->vm_mm, (__address), (__ptep), \
80 pte_mkclean(__pte)); \
81 r; \
82})
83#endif
84
85#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
86#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
87({ \
88 int __dirty; \
89 __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \
90 if (__dirty) \
91 flush_tlb_page(__vma, __address); \
92 __dirty; \
93})
94#endif
95
96#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 52#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
97#define ptep_get_and_clear(__mm, __address, __ptep) \ 53#define ptep_get_and_clear(__mm, __address, __ptep) \
98({ \ 54({ \
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 09ec447fe2af..16a466e50681 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -18,7 +18,8 @@
18#define get_unaligned(ptr) \ 18#define get_unaligned(ptr) \
19 __get_unaligned((ptr), sizeof(*(ptr))) 19 __get_unaligned((ptr), sizeof(*(ptr)))
20#define put_unaligned(x,ptr) \ 20#define put_unaligned(x,ptr) \
21 __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr))) 21 ((void)sizeof(*(ptr)=(x)),\
22 __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
22 23
23/* 24/*
24 * This function doesn't actually exist. The idea is that when 25 * This function doesn't actually exist. The idea is that when
@@ -95,21 +96,21 @@ static inline void __ustw(__u16 val, __u16 *addr)
95 default: \ 96 default: \
96 bad_unaligned_access_length(); \ 97 bad_unaligned_access_length(); \
97 }; \ 98 }; \
98 (__typeof__(*(ptr)))val; \ 99 (__force __typeof__(*(ptr)))val; \
99}) 100})
100 101
101#define __put_unaligned(val, ptr, size) \ 102#define __put_unaligned(val, ptr, size) \
102do { \ 103({ \
103 void *__gu_p = ptr; \ 104 void *__gu_p = ptr; \
104 switch (size) { \ 105 switch (size) { \
105 case 1: \ 106 case 1: \
106 *(__u8 *)__gu_p = val; \ 107 *(__u8 *)__gu_p = (__force __u8)val; \
107 break; \ 108 break; \
108 case 2: \ 109 case 2: \
109 __ustw(val, __gu_p); \ 110 __ustw((__force __u16)val, __gu_p); \
110 break; \ 111 break; \
111 case 4: \ 112 case 4: \
112 __ustl(val, __gu_p); \ 113 __ustl((__force __u32)val, __gu_p); \
113 break; \ 114 break; \
114 case 8: \ 115 case 8: \
115 __ustq(val, __gu_p); \ 116 __ustq(val, __gu_p); \
@@ -117,6 +118,7 @@ do { \
117 default: \ 118 default: \
118 bad_unaligned_access_length(); \ 119 bad_unaligned_access_length(); \
119 }; \ 120 }; \
120} while(0) 121 (void)0; \
122})
121 123
122#endif /* _ASM_GENERIC_UNALIGNED_H */ 124#endif /* _ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-h8300/fb.h b/include/asm-h8300/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-h8300/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index 3b4f2903f91d..c8cc81a3aca5 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -22,7 +22,8 @@
22#define clear_user_page(page, vaddr, pg) clear_page(page) 22#define clear_user_page(page, vaddr, pg) clear_page(page)
23#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 23#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
24 24
25#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 25#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
26 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
26#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 27#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
27 28
28/* 29/*
diff --git a/include/asm-i386/fb.h b/include/asm-i386/fb.h
new file mode 100644
index 000000000000..d1c6297d4a61
--- /dev/null
+++ b/include/asm-i386/fb.h
@@ -0,0 +1,17 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8extern int fb_is_primary_device(struct fb_info *info);
9
10static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
11 unsigned long off)
12{
13 if (boot_cpu_data.x86 > 3)
14 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
15}
16
17#endif /* _ASM_FB_H_ */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 818ac8bf01e2..99cf5d3692a9 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -34,7 +34,8 @@
34#define clear_user_page(page, vaddr, pg) clear_page(page) 34#define clear_user_page(page, vaddr, pg) clear_page(page)
35#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 35#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
36 36
37#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 37#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
38 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
38#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 39#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
39 40
40/* 41/*
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 01734e05e63b..c7fefa6b12fd 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -289,17 +289,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
289 __changed; \ 289 __changed; \
290}) 290})
291 291
292#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
293#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
294 int __ret = 0; \
295 if (pte_dirty(*(ptep))) \
296 __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
297 &(ptep)->pte_low); \
298 if (__ret) \
299 pte_update((vma)->vm_mm, addr, ptep); \
300 __ret; \
301})
302
303#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 292#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
304#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ 293#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
305 int __ret = 0; \ 294 int __ret = 0; \
@@ -311,27 +300,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
311 __ret; \ 300 __ret; \
312}) 301})
313 302
314/*
315 * Rules for using ptep_establish: the pte MUST be a user pte, and
316 * must be a present->present transition.
317 */
318#define __HAVE_ARCH_PTEP_ESTABLISH
319#define ptep_establish(vma, address, ptep, pteval) \
320do { \
321 set_pte_present((vma)->vm_mm, address, ptep, pteval); \
322 flush_tlb_page(vma, address); \
323} while (0)
324
325#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
326#define ptep_clear_flush_dirty(vma, address, ptep) \
327({ \
328 int __dirty; \
329 __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \
330 if (__dirty) \
331 flush_tlb_page(vma, address); \
332 __dirty; \
333})
334
335#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 303#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
336#define ptep_clear_flush_young(vma, address, ptep) \ 304#define ptep_clear_flush_young(vma, address, ptep) \
337({ \ 305({ \
diff --git a/include/asm-ia64/fb.h b/include/asm-ia64/fb.h
new file mode 100644
index 000000000000..89a397cee90a
--- /dev/null
+++ b/include/asm-ia64/fb.h
@@ -0,0 +1,23 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <linux/efi.h>
7#include <asm/page.h>
8
9static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
10 unsigned long off)
11{
12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
14 else
15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
16}
17
18static inline int fb_is_primary_device(struct fb_info *info)
19{
20 return 0;
21}
22
23#endif /* _ASM_FB_H_ */
diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h
index 31ee521aeb7a..f41b636a0bf6 100644
--- a/include/asm-ia64/ioctls.h
+++ b/include/asm-ia64/ioctls.h
@@ -53,6 +53,10 @@
53#define TIOCSBRK 0x5427 /* BSD compatibility */ 53#define TIOCSBRK 0x5427 /* BSD compatibility */
54#define TIOCCBRK 0x5428 /* BSD compatibility */ 54#define TIOCCBRK 0x5428 /* BSD compatibility */
55#define TIOCGSID 0x5429 /* Return the session ID of FD */ 55#define TIOCGSID 0x5429 /* Return the session ID of FD */
56#define TCGETS2 _IOR('T',0x2A, struct termios2)
57#define TCSETS2 _IOW('T',0x2B, struct termios2)
58#define TCSETSW2 _IOW('T',0x2C, struct termios2)
59#define TCSETSF2 _IOW('T',0x2D, struct termios2)
56#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 60#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
57#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 61#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
58 62
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index 485759ba9e36..d6345464a2b3 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -87,12 +87,13 @@ do { \
87} while (0) 87} while (0)
88 88
89 89
90#define alloc_zeroed_user_highpage(vma, vaddr) \ 90#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
91({ \ 91({ \
92 struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \ 92 struct page *page = alloc_page_vma( \
93 if (page) \ 93 GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
94 flush_dcache_page(page); \ 94 if (page) \
95 page; \ 95 flush_dcache_page(page); \
96 page; \
96}) 97})
97 98
98#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 99#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index f923d811c421..de6d01e24dd0 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -395,22 +395,6 @@ ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t
395#endif 395#endif
396} 396}
397 397
398static inline int
399ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
400{
401#ifdef CONFIG_SMP
402 if (!pte_dirty(*ptep))
403 return 0;
404 return test_and_clear_bit(_PAGE_D_BIT, ptep);
405#else
406 pte_t pte = *ptep;
407 if (!pte_dirty(pte))
408 return 0;
409 set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
410 return 1;
411#endif
412}
413
414static inline pte_t 398static inline pte_t
415ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 399ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
416{ 400{
@@ -543,8 +527,10 @@ extern void lazy_mmu_prot_update (pte_t pte);
543# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 527# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
544({ \ 528({ \
545 int __changed = !pte_same(*(__ptep), __entry); \ 529 int __changed = !pte_same(*(__ptep), __entry); \
546 if (__changed) \ 530 if (__changed) { \
547 ptep_establish(__vma, __addr, __ptep, __entry); \ 531 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
532 flush_tlb_page(__vma, __addr); \
533 } \
548 __changed; \ 534 __changed; \
549}) 535})
550#endif 536#endif
@@ -588,7 +574,6 @@ extern void lazy_mmu_prot_update (pte_t pte);
588#endif 574#endif
589 575
590#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 576#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
591#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
592#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 577#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
593#define __HAVE_ARCH_PTEP_SET_WRPROTECT 578#define __HAVE_ARCH_PTEP_SET_WRPROTECT
594#define __HAVE_ARCH_PTE_SAME 579#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h
index 7fae3109ef47..9f162e0089ad 100644
--- a/include/asm-ia64/termbits.h
+++ b/include/asm-ia64/termbits.h
@@ -149,6 +149,7 @@ struct ktermios {
149#define HUPCL 0002000 149#define HUPCL 0002000
150#define CLOCAL 0004000 150#define CLOCAL 0004000
151#define CBAUDEX 0010000 151#define CBAUDEX 0010000
152#define BOTHER 0010000
152#define B57600 0010001 153#define B57600 0010001
153#define B115200 0010002 154#define B115200 0010002
154#define B230400 0010003 155#define B230400 0010003
@@ -164,10 +165,12 @@ struct ktermios {
164#define B3000000 0010015 165#define B3000000 0010015
165#define B3500000 0010016 166#define B3500000 0010016
166#define B4000000 0010017 167#define B4000000 0010017
167#define CIBAUD 002003600000 /* input baud rate (not used) */ 168#define CIBAUD 002003600000 /* input baud rate */
168#define CMSPAR 010000000000 /* mark or space (stick) parity */ 169#define CMSPAR 010000000000 /* mark or space (stick) parity */
169#define CRTSCTS 020000000000 /* flow control */ 170#define CRTSCTS 020000000000 /* flow control */
170 171
172#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
173
171/* c_lflag bits */ 174/* c_lflag bits */
172#define ISIG 0000001 175#define ISIG 0000001
173#define ICANON 0000002 176#define ICANON 0000002
diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h
index 08750c2d3607..689d218c0c28 100644
--- a/include/asm-ia64/termios.h
+++ b/include/asm-ia64/termios.h
@@ -87,8 +87,10 @@ struct termio {
87 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ 87 copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
88}) 88})
89 89
90#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 90#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
91#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 91#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
92#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
93#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
92 94
93# endif /* __KERNEL__ */ 95# endif /* __KERNEL__ */
94 96
diff --git a/include/asm-m32r/fb.h b/include/asm-m32r/fb.h
new file mode 100644
index 000000000000..d92e99cd8c8a
--- /dev/null
+++ b/include/asm-m32r/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
index 6f6ecf7d14a3..04fd183a2c58 100644
--- a/include/asm-m32r/page.h
+++ b/include/asm-m32r/page.h
@@ -15,7 +15,8 @@ extern void copy_page(void *to, void *from);
15#define clear_user_page(page, vaddr, pg) clear_page(page) 15#define clear_user_page(page, vaddr, pg) clear_page(page)
16#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 16#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
17 17
18#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 18#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
19 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
19#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 20#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
20 21
21/* 22/*
diff --git a/include/asm-m32r/pgtable.h b/include/asm-m32r/pgtable.h
index 35af58c6b812..92d7266783fd 100644
--- a/include/asm-m32r/pgtable.h
+++ b/include/asm-m32r/pgtable.h
@@ -250,11 +250,6 @@ static inline pte_t pte_mkwrite(pte_t pte)
250 return pte; 250 return pte;
251} 251}
252 252
253static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
254{
255 return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep);
256}
257
258static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 253static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
259{ 254{
260 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); 255 return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
@@ -348,7 +343,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
348 remap_pfn_range(vma, vaddr, pfn, size, prot) 343 remap_pfn_range(vma, vaddr, pfn, size, prot)
349 344
350#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 345#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
351#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
352#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 346#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
353#define __HAVE_ARCH_PTEP_SET_WRPROTECT 347#define __HAVE_ARCH_PTEP_SET_WRPROTECT
354#define __HAVE_ARCH_PTE_SAME 348#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-m68k/fb.h b/include/asm-m68k/fb.h
new file mode 100644
index 000000000000..380b97ae8157
--- /dev/null
+++ b/include/asm-m68k/fb.h
@@ -0,0 +1,34 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7#include <asm/setup.h>
8
9#ifdef CONFIG_SUN3
10static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
11 unsigned long off)
12{
13 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
14}
15#else
16static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
17 unsigned long off)
18{
19 if (CPU_IS_020_OR_030)
20 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
21 if (CPU_IS_040_OR_060) {
22 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
23 /* Use no-cache mode, serialized */
24 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
25 }
26}
27#endif /* CONFIG_SUN3 */
28
29static inline int fb_is_primary_device(struct fb_info *info)
30{
31 return 0;
32}
33
34#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m68knommu/fb.h b/include/asm-m68knommu/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-m68knommu/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h
index 2a1b8bdcb29c..9efa0a9851b1 100644
--- a/include/asm-m68knommu/page.h
+++ b/include/asm-m68knommu/page.h
@@ -22,7 +22,8 @@
22#define clear_user_page(page, vaddr, pg) clear_page(page) 22#define clear_user_page(page, vaddr, pg) clear_page(page)
23#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 23#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
24 24
25#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 25#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
26 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
26#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 27#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
27 28
28/* 29/*
diff --git a/include/asm-mips/fb.h b/include/asm-mips/fb.h
new file mode 100644
index 000000000000..bd3f68c9ddfc
--- /dev/null
+++ b/include/asm-mips/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-mips/sibyte/bcm1480_regs.h b/include/asm-mips/sibyte/bcm1480_regs.h
index bda391d3af85..2738c1366f66 100644
--- a/include/asm-mips/sibyte/bcm1480_regs.h
+++ b/include/asm-mips/sibyte/bcm1480_regs.h
@@ -220,17 +220,25 @@
220#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1) 220#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
221 221
222#define BCM1480_DUART_CHANREG_SPACING 0x100 222#define BCM1480_DUART_CHANREG_SPACING 0x100
223#define A_BCM1480_DUART_CHANREG(chan,reg) (A_BCM1480_DUART(chan) \ 223#define A_BCM1480_DUART_CHANREG(chan, reg) \
224 + BCM1480_DUART_CHANREG_SPACING*((chan)&1) \ 224 (A_BCM1480_DUART(chan) + \
225 + (reg)) 225 BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
226#define R_BCM1480_DUART_CHANREG(chan,reg) (BCM1480_DUART_CHANREG_SPACING*((chan)&1) + (reg)) 226#define A_BCM1480_DUART_CTRLREG(chan, reg) \
227 227 (A_BCM1480_DUART(chan) + \
228#define R_BCM1480_DUART_IMRREG(chan) (R_DUART_IMR_A + ((chan)&1)*DUART_IMRISR_SPACING) 228 BCM1480_DUART_CHANREG_SPACING * 3 + (reg))
229#define R_BCM1480_DUART_ISRREG(chan) (R_DUART_ISR_A + ((chan)&1)*DUART_IMRISR_SPACING) 229
230 230#define R_BCM1480_DUART_IMRREG(chan) \
231#define A_BCM1480_DUART_IMRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_IMRREG(chan)) 231 (R_DUART_IMR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
232#define A_BCM1480_DUART_ISRREG(chan) (A_BCM1480_DUART(chan) + R_BCM1480_DUART_ISRREG(chan)) 232#define R_BCM1480_DUART_ISRREG(chan) \
233#define A_BCM1480_DUART_IN_PORT(chan) (A_BCM1480_DUART(chan) + R_DUART_INP_ORT) 233 (R_DUART_ISR_A + ((chan) & 1) * DUART_IMRISR_SPACING)
234
235#define A_BCM1480_DUART_IMRREG(chan) \
236 (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_IMRREG(chan)))
237#define A_BCM1480_DUART_ISRREG(chan) \
238 (A_BCM1480_DUART_CTRLREG((chan), R_BCM1480_DUART_ISRREG(chan)))
239
240#define A_BCM1480_DUART_IN_PORT(chan) \
241 (A_BCM1480_DUART_CTRLREG((chan), R_DUART_IN_PORT))
234 242
235/* 243/*
236 * These constants are the absolute addresses. 244 * These constants are the absolute addresses.
diff --git a/include/asm-mips/sibyte/sb1250_regs.h b/include/asm-mips/sibyte/sb1250_regs.h
index da7c188993c9..220b7e94f1bf 100644
--- a/include/asm-mips/sibyte/sb1250_regs.h
+++ b/include/asm-mips/sibyte/sb1250_regs.h
@@ -272,59 +272,69 @@
272 ********************************************************************* */ 272 ********************************************************************* */
273 273
274 274
275#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */ 275#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
276#define R_DUART_NUM_PORTS 2 276#define R_DUART_NUM_PORTS 2
277 277
278#define A_DUART 0x0010060000 278#define A_DUART 0x0010060000
279 279
280#define DUART_CHANREG_SPACING 0x100 280#define DUART_CHANREG_SPACING 0x100
281#define A_DUART_CHANREG(chan,reg) (A_DUART + DUART_CHANREG_SPACING*(chan) + (reg)) 281
282#define R_DUART_CHANREG(chan,reg) (DUART_CHANREG_SPACING*(chan) + (reg)) 282#define A_DUART_CHANREG(chan, reg) \
283 (A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
283#endif /* 1250 & 112x */ 284#endif /* 1250 & 112x */
284 285
285#define R_DUART_MODE_REG_1 0x100 286#define R_DUART_MODE_REG_1 0x000
286#define R_DUART_MODE_REG_2 0x110 287#define R_DUART_MODE_REG_2 0x010
287#define R_DUART_STATUS 0x120 288#define R_DUART_STATUS 0x020
288#define R_DUART_CLK_SEL 0x130 289#define R_DUART_CLK_SEL 0x030
289#define R_DUART_CMD 0x150 290#define R_DUART_CMD 0x050
290#define R_DUART_RX_HOLD 0x160 291#define R_DUART_RX_HOLD 0x060
291#define R_DUART_TX_HOLD 0x170 292#define R_DUART_TX_HOLD 0x070
292 293
293#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480) 294#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
294#define R_DUART_FULL_CTL 0x140 295#define R_DUART_FULL_CTL 0x040
295#define R_DUART_OPCR_X 0x180 296#define R_DUART_OPCR_X 0x080
296#define R_DUART_AUXCTL_X 0x190 297#define R_DUART_AUXCTL_X 0x090
297#endif /* 1250 PASS2 || 112x PASS1 || 1480*/ 298#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
298 299
299 300
300/* 301/*
301 * The IMR and ISR can't be addressed with A_DUART_CHANREG, 302 * The IMR and ISR can't be addressed with A_DUART_CHANREG,
302 * so use this macro instead. 303 * so use these macros instead.
303 */ 304 */
304 305
305#define R_DUART_AUX_CTRL 0x310 306#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
306#define R_DUART_ISR_A 0x320 307#define DUART_IMRISR_SPACING 0x20
307#define R_DUART_IMR_A 0x330 308#define DUART_INCHNG_SPACING 0x10
308#define R_DUART_ISR_B 0x340
309#define R_DUART_IMR_B 0x350
310#define R_DUART_OUT_PORT 0x360
311#define R_DUART_OPCR 0x370
312#define R_DUART_IN_PORT 0x380
313 309
314#define R_DUART_SET_OPR 0x3B0 310#define A_DUART_CTRLREG(reg) \
315#define R_DUART_CLEAR_OPR 0x3C0 311 (A_DUART + DUART_CHANREG_SPACING * 3 + (reg))
316 312
317#define DUART_IMRISR_SPACING 0x20 313#define R_DUART_IMRREG(chan) \
314 (R_DUART_IMR_A + (chan) * DUART_IMRISR_SPACING)
315#define R_DUART_ISRREG(chan) \
316 (R_DUART_ISR_A + (chan) * DUART_IMRISR_SPACING)
317#define R_DUART_INCHREG(chan) \
318 (R_DUART_IN_CHNG_A + (chan) * DUART_INCHNG_SPACING)
318 319
319#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */ 320#define A_DUART_IMRREG(chan) A_DUART_CTRLREG(R_DUART_IMRREG(chan))
320#define R_DUART_IMRREG(chan) (R_DUART_IMR_A + (chan)*DUART_IMRISR_SPACING) 321#define A_DUART_ISRREG(chan) A_DUART_CTRLREG(R_DUART_ISRREG(chan))
321#define R_DUART_ISRREG(chan) (R_DUART_ISR_A + (chan)*DUART_IMRISR_SPACING) 322#define A_DUART_INCHREG(chan) A_DUART_CTRLREG(R_DUART_INCHREG(chan))
322
323#define A_DUART_IMRREG(chan) (A_DUART + R_DUART_IMRREG(chan))
324#define A_DUART_ISRREG(chan) (A_DUART + R_DUART_ISRREG(chan))
325#endif /* 1250 & 112x */ 323#endif /* 1250 & 112x */
326 324
327 325#define R_DUART_AUX_CTRL 0x010
326#define R_DUART_ISR_A 0x020
327#define R_DUART_IMR_A 0x030
328#define R_DUART_ISR_B 0x040
329#define R_DUART_IMR_B 0x050
330#define R_DUART_OUT_PORT 0x060
331#define R_DUART_OPCR 0x070
332#define R_DUART_IN_PORT 0x080
333
334#define R_DUART_SET_OPR 0x0B0
335#define R_DUART_CLEAR_OPR 0x0C0
336#define R_DUART_IN_CHNG_A 0x0D0
337#define R_DUART_IN_CHNG_B 0x0E0
328 338
329 339
330/* 340/*
diff --git a/include/asm-mips/sibyte/sb1250_uart.h b/include/asm-mips/sibyte/sb1250_uart.h
index e87045e62bf0..cf74fedcbef1 100644
--- a/include/asm-mips/sibyte/sb1250_uart.h
+++ b/include/asm-mips/sibyte/sb1250_uart.h
@@ -75,7 +75,8 @@
75#define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED) 75#define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
76#define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE) 76#define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
77 77
78#define M_DUART_ERR_MODE _SB_MAKEMASK1(5) /* must be zero */ 78#define M_DUART_TX_IRQ_SEL_TXRDY 0
79#define M_DUART_TX_IRQ_SEL_TXEMPT _SB_MAKEMASK1(5)
79 80
80#define M_DUART_RX_IRQ_SEL_RXRDY 0 81#define M_DUART_RX_IRQ_SEL_RXRDY 0
81#define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6) 82#define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6)
@@ -246,10 +247,13 @@
246 247
247#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2) 248#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
248#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3) 249#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
250#define M_DUART_ISR_ALL_A _SB_MAKEMASK(4,0)
251
249#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4) 252#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
250#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5) 253#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
251#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6) 254#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
252#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7) 255#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
256#define M_DUART_ISR_ALL_B _SB_MAKEMASK(4,4)
253 257
254/* 258/*
255 * DUART Channel A Interrupt Status Register (Table 10-17) 259 * DUART Channel A Interrupt Status Register (Table 10-17)
@@ -262,6 +266,7 @@
262#define M_DUART_ISR_RX _SB_MAKEMASK1(1) 266#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
263#define M_DUART_ISR_BRK _SB_MAKEMASK1(2) 267#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
264#define M_DUART_ISR_IN _SB_MAKEMASK1(3) 268#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
269#define M_DUART_ISR_ALL _SB_MAKEMASK(4,0)
265#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4,4) 270#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4,4)
266 271
267/* 272/*
diff --git a/include/asm-parisc/fb.h b/include/asm-parisc/fb.h
new file mode 100644
index 000000000000..4d503a023ab2
--- /dev/null
+++ b/include/asm-parisc/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h
index 7e222c8ba739..e88cacd63724 100644
--- a/include/asm-parisc/pgtable.h
+++ b/include/asm-parisc/pgtable.h
@@ -447,21 +447,6 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
447#endif 447#endif
448} 448}
449 449
450static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
451{
452#ifdef CONFIG_SMP
453 if (!pte_dirty(*ptep))
454 return 0;
455 return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep));
456#else
457 pte_t pte = *ptep;
458 if (!pte_dirty(pte))
459 return 0;
460 set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
461 return 1;
462#endif
463}
464
465extern spinlock_t pa_dbit_lock; 450extern spinlock_t pa_dbit_lock;
466 451
467struct mm_struct; 452struct mm_struct;
@@ -529,7 +514,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
529#define HAVE_ARCH_UNMAPPED_AREA 514#define HAVE_ARCH_UNMAPPED_AREA
530 515
531#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 516#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
532#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
533#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 517#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
534#define __HAVE_ARCH_PTEP_SET_WRPROTECT 518#define __HAVE_ARCH_PTEP_SET_WRPROTECT
535#define __HAVE_ARCH_PTE_SAME 519#define __HAVE_ARCH_PTE_SAME
diff --git a/include/asm-powerpc/fb.h b/include/asm-powerpc/fb.h
new file mode 100644
index 000000000000..411af8d17a69
--- /dev/null
+++ b/include/asm-powerpc/fb.h
@@ -0,0 +1,21 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
12 vma->vm_end - vma->vm_start,
13 vma->vm_page_prot);
14}
15
16static inline int fb_is_primary_device(struct fb_info *info)
17{
18 return 0;
19}
20
21#endif /* _ASM_FB_H_ */
diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h
index b0e40ff32ee0..9537fda238b8 100644
--- a/include/asm-powerpc/kprobes.h
+++ b/include/asm-powerpc/kprobes.h
@@ -65,10 +65,10 @@ typedef unsigned int kprobe_opcode_t;
65 } else if (name[0] != '.') \ 65 } else if (name[0] != '.') \
66 addr = *(kprobe_opcode_t **)addr; \ 66 addr = *(kprobe_opcode_t **)addr; \
67 } else { \ 67 } else { \
68 char dot_name[KSYM_NAME_LEN+1]; \ 68 char dot_name[KSYM_NAME_LEN]; \
69 dot_name[0] = '.'; \ 69 dot_name[0] = '.'; \
70 dot_name[1] = '\0'; \ 70 dot_name[1] = '\0'; \
71 strncat(dot_name, name, KSYM_NAME_LEN); \ 71 strncat(dot_name, name, KSYM_NAME_LEN - 2); \
72 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \ 72 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); \
73 } \ 73 } \
74} 74}
diff --git a/include/asm-powerpc/pgtable-ppc32.h b/include/asm-powerpc/pgtable-ppc32.h
index 6c236d4d6262..86a54a4a8a2a 100644
--- a/include/asm-powerpc/pgtable-ppc32.h
+++ b/include/asm-powerpc/pgtable-ppc32.h
@@ -621,13 +621,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
621#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 621#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
622 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 622 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
623 623
624#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
625static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
626 unsigned long addr, pte_t *ptep)
627{
628 return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
629}
630
631#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 624#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
632static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 625static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
633 pte_t *ptep) 626 pte_t *ptep)
diff --git a/include/asm-powerpc/pgtable-ppc64.h b/include/asm-powerpc/pgtable-ppc64.h
index 7ca8b5c10019..300f9a199bf2 100644
--- a/include/asm-powerpc/pgtable-ppc64.h
+++ b/include/asm-powerpc/pgtable-ppc64.h
@@ -292,29 +292,6 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
292 __r; \ 292 __r; \
293}) 293})
294 294
295/*
296 * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the
297 * moment we always flush but we need to fix hpte_update and test if the
298 * optimisation is worth it.
299 */
300static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm,
301 unsigned long addr, pte_t *ptep)
302{
303 unsigned long old;
304
305 if ((pte_val(*ptep) & _PAGE_DIRTY) == 0)
306 return 0;
307 old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0);
308 return (old & _PAGE_DIRTY) != 0;
309}
310#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
311#define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \
312({ \
313 int __r; \
314 __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \
315 __r; \
316})
317
318#define __HAVE_ARCH_PTEP_SET_WRPROTECT 295#define __HAVE_ARCH_PTEP_SET_WRPROTECT
319static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 296static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
320 pte_t *ptep) 297 pte_t *ptep)
@@ -342,14 +319,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
342 __young; \ 319 __young; \
343}) 320})
344 321
345#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
346#define ptep_clear_flush_dirty(__vma, __address, __ptep) \
347({ \
348 int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \
349 __ptep); \
350 __dirty; \
351})
352
353#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 322#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
354static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 323static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
355 unsigned long addr, pte_t *ptep) 324 unsigned long addr, pte_t *ptep)
diff --git a/include/asm-ppc/pgtable.h b/include/asm-ppc/pgtable.h
index 18aa776313b9..c159315d2c8f 100644
--- a/include/asm-ppc/pgtable.h
+++ b/include/asm-ppc/pgtable.h
@@ -654,13 +654,6 @@ static inline int __ptep_test_and_clear_young(unsigned int context, unsigned lon
654#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 654#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
655 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 655 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
656 656
657#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
658static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
659 unsigned long addr, pte_t *ptep)
660{
661 return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
662}
663
664#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 657#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
665static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 658static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
666 pte_t *ptep) 659 pte_t *ptep)
diff --git a/include/asm-s390/fb.h b/include/asm-s390/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-s390/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 05ea6f172786..f326451ed6ec 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -64,7 +64,8 @@ static inline void copy_page(void *to, void *from)
64#define clear_user_page(page, vaddr, pg) clear_page(page) 64#define clear_user_page(page, vaddr, pg) clear_page(page)
65#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 65#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
66 66
67#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 67#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
68 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
68#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 69#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
69 70
70/* 71/*
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 26215a976127..3208dc6c412c 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -669,19 +669,6 @@ ptep_clear_flush_young(struct vm_area_struct *vma,
669 return ptep_test_and_clear_young(vma, address, ptep); 669 return ptep_test_and_clear_young(vma, address, ptep);
670} 670}
671 671
672static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
673{
674 return 0;
675}
676
677static inline int
678ptep_clear_flush_dirty(struct vm_area_struct *vma,
679 unsigned long address, pte_t *ptep)
680{
681 /* No need to flush TLB; bits are in storage key */
682 return ptep_test_and_clear_dirty(vma, address, ptep);
683}
684
685static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 672static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
686{ 673{
687 pte_t pte = *ptep; 674 pte_t pte = *ptep;
@@ -707,16 +694,19 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
707 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 694 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
708} 695}
709 696
710static inline pte_t 697static inline void ptep_invalidate(unsigned long address, pte_t *ptep)
711ptep_clear_flush(struct vm_area_struct *vma,
712 unsigned long address, pte_t *ptep)
713{ 698{
714 pte_t pte = *ptep;
715 pte_t *shadow_pte = get_shadow_pte(ptep);
716
717 __ptep_ipte(address, ptep); 699 __ptep_ipte(address, ptep);
718 if (shadow_pte) 700 ptep = get_shadow_pte(ptep);
719 __ptep_ipte(address, shadow_pte); 701 if (ptep)
702 __ptep_ipte(address, ptep);
703}
704
705static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
706 unsigned long address, pte_t *ptep)
707{
708 pte_t pte = *ptep;
709 ptep_invalidate(address, ptep);
720 return pte; 710 return pte;
721} 711}
722 712
@@ -726,21 +716,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
726 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 716 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
727} 717}
728 718
729static inline void 719#define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
730ptep_establish(struct vm_area_struct *vma, 720({ \
731 unsigned long address, pte_t *ptep, 721 int __changed = !pte_same(*(__ptep), __entry); \
732 pte_t entry) 722 if (__changed) { \
733{ 723 ptep_invalidate(__addr, __ptep); \
734 ptep_clear_flush(vma, address, ptep); 724 set_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
735 set_pte(ptep, entry); 725 } \
736} 726 __changed; \
737
738#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
739({ \
740 int __changed = !pte_same(*(__ptep), __entry); \
741 if (__changed) \
742 ptep_establish(__vma, __address, __ptep, __entry); \
743 __changed; \
744}) 727})
745 728
746/* 729/*
@@ -940,12 +923,9 @@ extern int remove_shared_memory(unsigned long start, unsigned long size);
940#define __HAVE_ARCH_MEMMAP_INIT 923#define __HAVE_ARCH_MEMMAP_INIT
941extern void memmap_init(unsigned long, int, unsigned long, unsigned long); 924extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
942 925
943#define __HAVE_ARCH_PTEP_ESTABLISH
944#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 926#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
945#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 927#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
946#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 928#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
947#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
948#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
949#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 929#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
950#define __HAVE_ARCH_PTEP_CLEAR_FLUSH 930#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
951#define __HAVE_ARCH_PTEP_SET_WRPROTECT 931#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/include/asm-sh/fb.h b/include/asm-sh/fb.h
new file mode 100644
index 000000000000..d92e99cd8c8a
--- /dev/null
+++ b/include/asm-sh/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sh64/fb.h b/include/asm-sh64/fb.h
new file mode 100644
index 000000000000..d92e99cd8c8a
--- /dev/null
+++ b/include/asm-sh64/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3
4#include <linux/fb.h>
5#include <linux/fs.h>
6#include <asm/page.h>
7
8static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
9 unsigned long off)
10{
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc/fb.h b/include/asm-sparc/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-sparc/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-sparc64/fb.h b/include/asm-sparc64/fb.h
new file mode 100644
index 000000000000..d6cd3a175fc3
--- /dev/null
+++ b/include/asm-sparc64/fb.h
@@ -0,0 +1,18 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4#include <linux/fs.h>
5#include <asm/page.h>
6
7static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
8 unsigned long off)
9{
10 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
11}
12
13static inline int fb_is_primary_device(struct fb_info *info)
14{
15 return 0;
16}
17
18#endif /* _ASM_FB_H_ */
diff --git a/include/asm-v850/fb.h b/include/asm-v850/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-v850/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/fb.h b/include/asm-x86_64/fb.h
new file mode 100644
index 000000000000..60548e651d12
--- /dev/null
+++ b/include/asm-x86_64/fb.h
@@ -0,0 +1,19 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4#include <linux/fs.h>
5#include <asm/page.h>
6
7static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
8 unsigned long off)
9{
10 if (boot_cpu_data.x86 > 3)
11 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
12}
13
14static inline int fb_is_primary_device(struct fb_info *info)
15{
16 return 0;
17}
18
19#endif /* _ASM_FB_H_ */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index e327c830da0c..88adf1afb0a2 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -48,7 +48,8 @@ void copy_page(void *, void *);
48#define clear_user_page(page, vaddr, pg) clear_page(page) 48#define clear_user_page(page, vaddr, pg) clear_page(page)
49#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 49#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
50 50
51#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) 51#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
52 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
52#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 53#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
53/* 54/*
54 * These are used to make use of C type-checking.. 55 * These are used to make use of C type-checking..
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 4f169ac6b10a..3ba53099297d 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -284,13 +284,6 @@ static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) &
284 284
285struct vm_area_struct; 285struct vm_area_struct;
286 286
287static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
288{
289 if (!pte_dirty(*ptep))
290 return 0;
291 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
292}
293
294static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 287static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
295{ 288{
296 if (!pte_young(*ptep)) 289 if (!pte_young(*ptep))
@@ -427,7 +420,6 @@ extern int kern_addr_valid(unsigned long addr);
427 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) 420 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
428 421
429#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 422#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
430#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
431#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 423#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
432#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 424#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
433#define __HAVE_ARCH_PTEP_SET_WRPROTECT 425#define __HAVE_ARCH_PTEP_SET_WRPROTECT
diff --git a/include/asm-xtensa/fb.h b/include/asm-xtensa/fb.h
new file mode 100644
index 000000000000..c7df38030992
--- /dev/null
+++ b/include/asm-xtensa/fb.h
@@ -0,0 +1,12 @@
1#ifndef _ASM_FB_H_
2#define _ASM_FB_H_
3#include <linux/fb.h>
4
5#define fb_pgprotect(...) do {} while (0)
6
7static inline int fb_is_primary_device(struct fb_info *info)
8{
9 return 0;
10}
11
12#endif /* _ASM_FB_H_ */
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h
index e9fc512cc247..06850f3b26a7 100644
--- a/include/asm-xtensa/pgtable.h
+++ b/include/asm-xtensa/pgtable.h
@@ -267,17 +267,6 @@ ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
267 return 1; 267 return 1;
268} 268}
269 269
270static inline int
271ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr,
272 pte_t *ptep)
273{
274 pte_t pte = *ptep;
275 if (!pte_dirty(pte))
276 return 0;
277 update_pte(ptep, pte_mkclean(pte));
278 return 1;
279}
280
281static inline pte_t 270static inline pte_t
282ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 271ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
283{ 272{
@@ -418,7 +407,6 @@ typedef pte_t *pte_addr_t;
418#endif /* !defined (__ASSEMBLY__) */ 407#endif /* !defined (__ASSEMBLY__) */
419 408
420#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 409#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
421#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
422#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 410#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
423#define __HAVE_ARCH_PTEP_SET_WRPROTECT 411#define __HAVE_ARCH_PTEP_SET_WRPROTECT
424#define __HAVE_ARCH_PTEP_MKDIRTY 412#define __HAVE_ARCH_PTEP_MKDIRTY
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b32564a1e105..f78965fc6426 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -624,7 +624,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
624 */ 624 */
625#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 625#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
626 626
627#ifdef CONFIG_MMU 627#ifdef CONFIG_BOUNCE
628extern int init_emergency_isa_pool(void); 628extern int init_emergency_isa_pool(void);
629extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); 629extern void blk_queue_bounce(request_queue_t *q, struct bio **bio);
630#else 630#else
diff --git a/include/linux/crc7.h b/include/linux/crc7.h
new file mode 100644
index 000000000000..1786e772d5c6
--- /dev/null
+++ b/include/linux/crc7.h
@@ -0,0 +1,14 @@
1#ifndef _LINUX_CRC7_H
2#define _LINUX_CRC7_H
3#include <linux/types.h>
4
5extern const u8 crc7_syndrome_table[256];
6
7static inline u8 crc7_byte(u8 crc, u8 data)
8{
9 return crc7_syndrome_table[(crc << 1) ^ data];
10}
11
12extern u8 crc7(u8 crc, const u8 *buffer, size_t len);
13
14#endif
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index dfed8009ebff..16cb25cbf7c5 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -45,6 +45,7 @@ extern efs_block_t efs_map_block(struct inode *, efs_block_t);
45extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int); 45extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
46 46
47extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *); 47extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *);
48extern struct dentry *efs_get_dentry(struct super_block *sb, void *vobjp);
48extern struct dentry *efs_get_parent(struct dentry *); 49extern struct dentry *efs_get_parent(struct dentry *);
49extern int efs_bmap(struct inode *, int); 50extern int efs_bmap(struct inode *, int);
50 51
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
new file mode 100644
index 000000000000..8872fe8392d6
--- /dev/null
+++ b/include/linux/exportfs.h
@@ -0,0 +1,126 @@
1#ifndef LINUX_EXPORTFS_H
2#define LINUX_EXPORTFS_H 1
3
4#include <linux/types.h>
5
6struct dentry;
7struct super_block;
8struct vfsmount;
9
10
11/**
12 * struct export_operations - for nfsd to communicate with file systems
13 * @decode_fh: decode a file handle fragment and return a &struct dentry
14 * @encode_fh: encode a file handle fragment from a dentry
15 * @get_name: find the name for a given inode in a given directory
16 * @get_parent: find the parent of a given directory
17 * @get_dentry: find a dentry for the inode given a file handle sub-fragment
18 * @find_exported_dentry:
19 * set by the exporting module to a standard helper function.
20 *
21 * Description:
22 * The export_operations structure provides a means for nfsd to communicate
23 * with a particular exported file system - particularly enabling nfsd and
24 * the filesystem to co-operate when dealing with file handles.
25 *
26 * export_operations contains two basic operation for dealing with file
27 * handles, decode_fh() and encode_fh(), and allows for some other
28 * operations to be defined which standard helper routines use to get
29 * specific information from the filesystem.
30 *
31 * nfsd encodes information use to determine which filesystem a filehandle
32 * applies to in the initial part of the file handle. The remainder, termed
33 * a file handle fragment, is controlled completely by the filesystem. The
34 * standard helper routines assume that this fragment will contain one or
35 * two sub-fragments, one which identifies the file, and one which may be
36 * used to identify the (a) directory containing the file.
37 *
38 * In some situations, nfsd needs to get a dentry which is connected into a
39 * specific part of the file tree. To allow for this, it passes the
40 * function acceptable() together with a @context which can be used to see
41 * if the dentry is acceptable. As there can be multiple dentrys for a
42 * given file, the filesystem should check each one for acceptability before
43 * looking for the next. As soon as an acceptable one is found, it should
44 * be returned.
45 *
46 * decode_fh:
47 * @decode_fh is given a &struct super_block (@sb), a file handle fragment
48 * (@fh, @fh_len) and an acceptability testing function (@acceptable,
49 * @context). It should return a &struct dentry which refers to the same
50 * file that the file handle fragment refers to, and which passes the
51 * acceptability test. If it cannot, it should return a %NULL pointer if
52 * the file was found but no acceptable &dentries were available, or a
53 * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
54 * %ENOMEM).
55 *
56 * encode_fh:
57 * @encode_fh should store in the file handle fragment @fh (using at most
58 * @max_len bytes) information that can be used by @decode_fh to recover the
59 * file refered to by the &struct dentry @de. If the @connectable flag is
60 * set, the encode_fh() should store sufficient information so that a good
61 * attempt can be made to find not only the file but also it's place in the
62 * filesystem. This typically means storing a reference to de->d_parent in
63 * the filehandle fragment. encode_fh() should return the number of bytes
64 * stored or a negative error code such as %-ENOSPC
65 *
66 * get_name:
67 * @get_name should find a name for the given @child in the given @parent
68 * directory. The name should be stored in the @name (with the
69 * understanding that it is already pointing to a a %NAME_MAX+1 sized
70 * buffer. get_name() should return %0 on success, a negative error code
71 * or error. @get_name will be called without @parent->i_mutex held.
72 *
73 * get_parent:
74 * @get_parent should find the parent directory for the given @child which
75 * is also a directory. In the event that it cannot be found, or storage
76 * space cannot be allocated, a %ERR_PTR should be returned.
77 *
78 * get_dentry:
79 * Given a &super_block (@sb) and a pointer to a file-system specific inode
80 * identifier, possibly an inode number, (@inump) get_dentry() should find
81 * the identified inode and return a dentry for that inode. Any suitable
82 * dentry can be returned including, if necessary, a new dentry created with
83 * d_alloc_root. The caller can then find any other extant dentrys by
84 * following the d_alias links. If a new dentry was created using
85 * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
86 * should be d_rehash()ed.
87 *
88 * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
89 * can be returned. The @inump will be whatever was passed to
90 * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
91 *
92 * Locking rules:
93 * get_parent is called with child->d_inode->i_mutex down
94 * get_name is not (which is possibly inconsistent)
95 */
96
97struct export_operations {
98 struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh,
99 int fh_len, int fh_type,
100 int (*acceptable)(void *context, struct dentry *de),
101 void *context);
102 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
103 int connectable);
104 int (*get_name)(struct dentry *parent, char *name,
105 struct dentry *child);
106 struct dentry * (*get_parent)(struct dentry *child);
107 struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
108
109 /* This is set by the exporting module to a standard helper */
110 struct dentry * (*find_exported_dentry)(
111 struct super_block *sb, void *obj, void *parent,
112 int (*acceptable)(void *context, struct dentry *de),
113 void *context);
114};
115
116extern struct dentry *find_exported_dentry(struct super_block *sb, void *obj,
117 void *parent, int (*acceptable)(void *context, struct dentry *de),
118 void *context);
119
120extern int exportfs_encode_fh(struct dentry *dentry, __u32 *fh, int *max_len,
121 int connectable);
122extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, __u32 *fh,
123 int fh_len, int fileid_type, int (*acceptable)(void *, struct dentry *),
124 void *context);
125
126#endif /* LINUX_EXPORTFS_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 66226824ab68..cec54106aa87 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -119,6 +119,7 @@ struct dentry;
119#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ 119#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */
120#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ 120#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */
121#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ 121#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */
122#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */
122#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ 123#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
123#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ 124#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
124#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ 125#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
@@ -529,6 +530,8 @@ struct fb_cursor_user {
529#define FB_EVENT_CONBLANK 0x0C 530#define FB_EVENT_CONBLANK 0x0C
530/* Get drawing requirements */ 531/* Get drawing requirements */
531#define FB_EVENT_GET_REQ 0x0D 532#define FB_EVENT_GET_REQ 0x0D
533/* Unbind from the console if possible */
534#define FB_EVENT_FB_UNBIND 0x0E
532 535
533struct fb_event { 536struct fb_event {
534 struct fb_info *info; 537 struct fb_info *info;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 4631086f5060..2d38b1a74662 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -1,5 +1,8 @@
1/* Freezer declarations */ 1/* Freezer declarations */
2 2
3#ifndef FREEZER_H_INCLUDED
4#define FREEZER_H_INCLUDED
5
3#include <linux/sched.h> 6#include <linux/sched.h>
4 7
5#ifdef CONFIG_PM 8#ifdef CONFIG_PM
@@ -115,6 +118,14 @@ static inline int freezer_should_skip(struct task_struct *p)
115 return !!(p->flags & PF_FREEZER_SKIP); 118 return !!(p->flags & PF_FREEZER_SKIP);
116} 119}
117 120
121/*
122 * Tell the freezer that the current task should be frozen by it
123 */
124static inline void set_freezable(void)
125{
126 current->flags &= ~PF_NOFREEZE;
127}
128
118#else 129#else
119static inline int frozen(struct task_struct *p) { return 0; } 130static inline int frozen(struct task_struct *p) { return 0; }
120static inline int freezing(struct task_struct *p) { return 0; } 131static inline int freezing(struct task_struct *p) { return 0; }
@@ -130,4 +141,7 @@ static inline int try_to_freeze(void) { return 0; }
130static inline void freezer_do_not_count(void) {} 141static inline void freezer_do_not_count(void) {}
131static inline void freezer_count(void) {} 142static inline void freezer_count(void) {}
132static inline int freezer_should_skip(struct task_struct *p) { return 0; } 143static inline int freezer_should_skip(struct task_struct *p) { return 0; }
144static inline void set_freezable(void) {}
133#endif 145#endif
146
147#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e68780810279..98205f680476 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -283,11 +283,14 @@ extern int dir_notify_enable;
283#include <linux/init.h> 283#include <linux/init.h>
284#include <linux/pid.h> 284#include <linux/pid.h>
285#include <linux/mutex.h> 285#include <linux/mutex.h>
286#include <linux/sysctl.h>
287#include <linux/capability.h>
286 288
287#include <asm/atomic.h> 289#include <asm/atomic.h>
288#include <asm/semaphore.h> 290#include <asm/semaphore.h>
289#include <asm/byteorder.h> 291#include <asm/byteorder.h>
290 292
293struct export_operations;
291struct hd_geometry; 294struct hd_geometry;
292struct iovec; 295struct iovec;
293struct nameidata; 296struct nameidata;
@@ -988,6 +991,9 @@ enum {
988#define put_fs_excl() atomic_dec(&current->fs_excl) 991#define put_fs_excl() atomic_dec(&current->fs_excl)
989#define has_fs_excl() atomic_read(&current->fs_excl) 992#define has_fs_excl() atomic_read(&current->fs_excl)
990 993
994#define is_owner_or_cap(inode) \
995 ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER))
996
991/* not quite ready to be deprecated, but... */ 997/* not quite ready to be deprecated, but... */
992extern void lock_super(struct super_block *); 998extern void lock_super(struct super_block *);
993extern void unlock_super(struct super_block *); 999extern void unlock_super(struct super_block *);
@@ -1277,119 +1283,6 @@ static inline void file_accessed(struct file *file)
1277 1283
1278int sync_inode(struct inode *inode, struct writeback_control *wbc); 1284int sync_inode(struct inode *inode, struct writeback_control *wbc);
1279 1285
1280/**
1281 * struct export_operations - for nfsd to communicate with file systems
1282 * @decode_fh: decode a file handle fragment and return a &struct dentry
1283 * @encode_fh: encode a file handle fragment from a dentry
1284 * @get_name: find the name for a given inode in a given directory
1285 * @get_parent: find the parent of a given directory
1286 * @get_dentry: find a dentry for the inode given a file handle sub-fragment
1287 * @find_exported_dentry:
1288 * set by the exporting module to a standard helper function.
1289 *
1290 * Description:
1291 * The export_operations structure provides a means for nfsd to communicate
1292 * with a particular exported file system - particularly enabling nfsd and
1293 * the filesystem to co-operate when dealing with file handles.
1294 *
1295 * export_operations contains two basic operation for dealing with file
1296 * handles, decode_fh() and encode_fh(), and allows for some other
1297 * operations to be defined which standard helper routines use to get
1298 * specific information from the filesystem.
1299 *
1300 * nfsd encodes information use to determine which filesystem a filehandle
1301 * applies to in the initial part of the file handle. The remainder, termed
1302 * a file handle fragment, is controlled completely by the filesystem. The
1303 * standard helper routines assume that this fragment will contain one or
1304 * two sub-fragments, one which identifies the file, and one which may be
1305 * used to identify the (a) directory containing the file.
1306 *
1307 * In some situations, nfsd needs to get a dentry which is connected into a
1308 * specific part of the file tree. To allow for this, it passes the
1309 * function acceptable() together with a @context which can be used to see
1310 * if the dentry is acceptable. As there can be multiple dentrys for a
1311 * given file, the filesystem should check each one for acceptability before
1312 * looking for the next. As soon as an acceptable one is found, it should
1313 * be returned.
1314 *
1315 * decode_fh:
1316 * @decode_fh is given a &struct super_block (@sb), a file handle fragment
1317 * (@fh, @fh_len) and an acceptability testing function (@acceptable,
1318 * @context). It should return a &struct dentry which refers to the same
1319 * file that the file handle fragment refers to, and which passes the
1320 * acceptability test. If it cannot, it should return a %NULL pointer if
1321 * the file was found but no acceptable &dentries were available, or a
1322 * %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
1323 * %ENOMEM).
1324 *
1325 * encode_fh:
1326 * @encode_fh should store in the file handle fragment @fh (using at most
1327 * @max_len bytes) information that can be used by @decode_fh to recover the
1328 * file refered to by the &struct dentry @de. If the @connectable flag is
1329 * set, the encode_fh() should store sufficient information so that a good
1330 * attempt can be made to find not only the file but also it's place in the
1331 * filesystem. This typically means storing a reference to de->d_parent in
1332 * the filehandle fragment. encode_fh() should return the number of bytes
1333 * stored or a negative error code such as %-ENOSPC
1334 *
1335 * get_name:
1336 * @get_name should find a name for the given @child in the given @parent
1337 * directory. The name should be stored in the @name (with the
1338 * understanding that it is already pointing to a a %NAME_MAX+1 sized
1339 * buffer. get_name() should return %0 on success, a negative error code
1340 * or error. @get_name will be called without @parent->i_mutex held.
1341 *
1342 * get_parent:
1343 * @get_parent should find the parent directory for the given @child which
1344 * is also a directory. In the event that it cannot be found, or storage
1345 * space cannot be allocated, a %ERR_PTR should be returned.
1346 *
1347 * get_dentry:
1348 * Given a &super_block (@sb) and a pointer to a file-system specific inode
1349 * identifier, possibly an inode number, (@inump) get_dentry() should find
1350 * the identified inode and return a dentry for that inode. Any suitable
1351 * dentry can be returned including, if necessary, a new dentry created with
1352 * d_alloc_root. The caller can then find any other extant dentrys by
1353 * following the d_alias links. If a new dentry was created using
1354 * d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
1355 * should be d_rehash()ed.
1356 *
1357 * If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
1358 * can be returned. The @inump will be whatever was passed to
1359 * nfsd_find_fh_dentry() in either the @obj or @parent parameters.
1360 *
1361 * Locking rules:
1362 * get_parent is called with child->d_inode->i_mutex down
1363 * get_name is not (which is possibly inconsistent)
1364 */
1365
1366struct export_operations {
1367 struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh, int fh_len, int fh_type,
1368 int (*acceptable)(void *context, struct dentry *de),
1369 void *context);
1370 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
1371 int connectable);
1372
1373 /* the following are only called from the filesystem itself */
1374 int (*get_name)(struct dentry *parent, char *name,
1375 struct dentry *child);
1376 struct dentry * (*get_parent)(struct dentry *child);
1377 struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
1378
1379 /* This is set by the exporting module to a standard helper */
1380 struct dentry * (*find_exported_dentry)(
1381 struct super_block *sb, void *obj, void *parent,
1382 int (*acceptable)(void *context, struct dentry *de),
1383 void *context);
1384
1385
1386};
1387
1388extern struct dentry *
1389find_exported_dentry(struct super_block *sb, void *obj, void *parent,
1390 int (*acceptable)(void *context, struct dentry *de),
1391 void *context);
1392
1393struct file_system_type { 1286struct file_system_type {
1394 const char *name; 1287 const char *name;
1395 int fs_flags; 1288 int fs_flags;
@@ -1526,7 +1419,7 @@ extern void putname(const char *name);
1526 1419
1527#ifdef CONFIG_BLOCK 1420#ifdef CONFIG_BLOCK
1528extern int register_blkdev(unsigned int, const char *); 1421extern int register_blkdev(unsigned int, const char *);
1529extern int unregister_blkdev(unsigned int, const char *); 1422extern void unregister_blkdev(unsigned int, const char *);
1530extern struct block_device *bdget(dev_t); 1423extern struct block_device *bdget(dev_t);
1531extern void bd_set_size(struct block_device *, loff_t size); 1424extern void bd_set_size(struct block_device *, loff_t size);
1532extern void bd_forget(struct inode *inode); 1425extern void bd_forget(struct inode *inode);
@@ -2050,5 +1943,9 @@ static inline void free_secdata(void *secdata)
2050{ } 1943{ }
2051#endif /* CONFIG_SECURITY */ 1944#endif /* CONFIG_SECURITY */
2052 1945
1946int proc_nr_files(ctl_table *table, int write, struct file *filp,
1947 void __user *buffer, size_t *lenp, loff_t *ppos);
1948
1949
2053#endif /* __KERNEL__ */ 1950#endif /* __KERNEL__ */
2054#endif /* _LINUX_FS_H */ 1951#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 12e631f0fb77..695741b0e420 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -112,7 +112,7 @@ struct fsl_usb2_platform_data {
112struct fsl_spi_platform_data { 112struct fsl_spi_platform_data {
113 u32 initial_spmode; /* initial SPMODE value */ 113 u32 initial_spmode; /* initial SPMODE value */
114 u16 bus_num; 114 u16 bus_num;
115 115 bool qe_mode;
116 /* board specific information */ 116 /* board specific information */
117 u16 max_chipselect; 117 u16 max_chipselect;
118 void (*activate_cs)(u8 cs, u8 polarity); 118 void (*activate_cs)(u8 cs, u8 polarity);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0d2ef0b082a6..bc68dd9a6d41 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -30,6 +30,9 @@ struct vm_area_struct;
30 * cannot handle allocation failures. 30 * cannot handle allocation failures.
31 * 31 *
32 * __GFP_NORETRY: The VM implementation must not retry indefinitely. 32 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
33 *
34 * __GFP_MOVABLE: Flag that this page will be movable by the page migration
35 * mechanism or reclaimed
33 */ 36 */
34#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ 37#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */
35#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ 38#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */
@@ -45,6 +48,7 @@ struct vm_area_struct;
45#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ 48#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
46#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */ 49#define __GFP_HARDWALL ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
47#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */ 50#define __GFP_THISNODE ((__force gfp_t)0x40000u)/* No fallback, no policies */
51#define __GFP_MOVABLE ((__force gfp_t)0x80000u) /* Page is movable */
48 52
49#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 53#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
50#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) 54#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
@@ -53,7 +57,8 @@ struct vm_area_struct;
53#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 57#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
54 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 58 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
55 __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ 59 __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \
56 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) 60 __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE| \
61 __GFP_MOVABLE)
57 62
58/* This equals 0, but use constants in case they ever change */ 63/* This equals 0, but use constants in case they ever change */
59#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) 64#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
@@ -65,6 +70,15 @@ struct vm_area_struct;
65#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 70#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
66#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 71#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
67 __GFP_HIGHMEM) 72 __GFP_HIGHMEM)
73#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
74 __GFP_HARDWALL | __GFP_HIGHMEM | \
75 __GFP_MOVABLE)
76#define GFP_NOFS_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_MOVABLE)
77#define GFP_USER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
78 __GFP_HARDWALL | __GFP_MOVABLE)
79#define GFP_HIGHUSER_PAGECACHE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
80 __GFP_HARDWALL | __GFP_HIGHMEM | \
81 __GFP_MOVABLE)
68 82
69#ifdef CONFIG_NUMA 83#ifdef CONFIG_NUMA
70#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) 84#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
@@ -92,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
92 if (flags & __GFP_DMA32) 106 if (flags & __GFP_DMA32)
93 return ZONE_DMA32; 107 return ZONE_DMA32;
94#endif 108#endif
109 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
110 (__GFP_HIGHMEM | __GFP_MOVABLE))
111 return ZONE_MOVABLE;
95#ifdef CONFIG_HIGHMEM 112#ifdef CONFIG_HIGHMEM
96 if (flags & __GFP_HIGHMEM) 113 if (flags & __GFP_HIGHMEM)
97 return ZONE_HIGHMEM; 114 return ZONE_HIGHMEM;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 98e2cce996a4..12c5e4e3135a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -73,10 +73,27 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
73} 73}
74 74
75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
76/**
77 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
78 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
79 * @vma: The VMA the page is to be allocated for
80 * @vaddr: The virtual address the page will be inserted into
81 *
82 * This function will allocate a page for a VMA but the caller is expected
83 * to specify via movableflags whether the page will be movable in the
84 * future or not
85 *
86 * An architecture may override this function by defining
87 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
88 * implementation.
89 */
76static inline struct page * 90static inline struct page *
77alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr) 91__alloc_zeroed_user_highpage(gfp_t movableflags,
92 struct vm_area_struct *vma,
93 unsigned long vaddr)
78{ 94{
79 struct page *page = alloc_page_vma(GFP_HIGHUSER, vma, vaddr); 95 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
96 vma, vaddr);
80 97
81 if (page) 98 if (page)
82 clear_user_highpage(page, vaddr); 99 clear_user_highpage(page, vaddr);
@@ -85,6 +102,36 @@ alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
85} 102}
86#endif 103#endif
87 104
105/**
106 * alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA
107 * @vma: The VMA the page is to be allocated for
108 * @vaddr: The virtual address the page will be inserted into
109 *
110 * This function will allocate a page for a VMA that the caller knows will
111 * not be able to move in the future using move_pages() or reclaim. If it
112 * is known that the page can move, use alloc_zeroed_user_highpage_movable
113 */
114static inline struct page *
115alloc_zeroed_user_highpage(struct vm_area_struct *vma, unsigned long vaddr)
116{
117 return __alloc_zeroed_user_highpage(0, vma, vaddr);
118}
119
120/**
121 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
122 * @vma: The VMA the page is to be allocated for
123 * @vaddr: The virtual address the page will be inserted into
124 *
125 * This function will allocate a page for a VMA that the caller knows will
126 * be able to migrate in the future using move_pages() or reclaimed
127 */
128static inline struct page *
129alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
130 unsigned long vaddr)
131{
132 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
133}
134
88static inline void clear_highpage(struct page *page) 135static inline void clear_highpage(struct page *page)
89{ 136{
90 void *kaddr = kmap_atomic(page, KM_USER0); 137 void *kaddr = kmap_atomic(page, KM_USER0);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 2c13715e9dde..49b7053043ad 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -15,6 +15,7 @@ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
15} 15}
16 16
17int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *); 17int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
18int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
18int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 19int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
19int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int); 20int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
20void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long); 21void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
@@ -29,6 +30,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to);
29void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); 30void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
30 31
31extern unsigned long max_huge_pages; 32extern unsigned long max_huge_pages;
33extern unsigned long hugepages_treat_as_movable;
32extern const unsigned long hugetlb_zero, hugetlb_infinity; 34extern const unsigned long hugetlb_zero, hugetlb_infinity;
33extern int sysctl_hugetlb_shm_group; 35extern int sysctl_hugetlb_shm_group;
34 36
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 5f06527dca21..f73de6fb5c68 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -7,9 +7,9 @@
7 7
8#include <linux/errno.h> 8#include <linux/errno.h>
9 9
10#define KSYM_NAME_LEN 127 10#define KSYM_NAME_LEN 128
11#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + KSYM_NAME_LEN + \ 11#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
12 2*(BITS_PER_LONG*3/10) + MODULE_NAME_LEN + 1) 12 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
13 13
14#ifdef CONFIG_KALLSYMS 14#ifdef CONFIG_KALLSYMS
15/* Lookup the address for a symbol. Returns 0 if not found. */ 15/* Lookup the address for a symbol. Returns 0 if not found. */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 7a4852505914..1eb9cde550c4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -210,6 +210,7 @@ extern enum system_states {
210#define TAINT_MACHINE_CHECK (1<<4) 210#define TAINT_MACHINE_CHECK (1<<4)
211#define TAINT_BAD_PAGE (1<<5) 211#define TAINT_BAD_PAGE (1<<5)
212#define TAINT_USER (1<<6) 212#define TAINT_USER (1<<6)
213#define TAINT_DIE (1<<7)
213 214
214extern void dump_stack(void); 215extern void dump_stack(void);
215 216
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index aea34e74c496..8c4350a9ed87 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -64,7 +64,7 @@ struct capi20_appl {
64 unsigned long nrecvdatapkt; 64 unsigned long nrecvdatapkt;
65 unsigned long nsentctlpkt; 65 unsigned long nsentctlpkt;
66 unsigned long nsentdatapkt; 66 unsigned long nsentdatapkt;
67 struct semaphore recv_sem; 67 struct mutex recv_mtx;
68 struct sk_buff_head recv_queue; 68 struct sk_buff_head recv_queue;
69 struct work_struct recv_work; 69 struct work_struct recv_work;
70 int release_in_progress; 70 int release_in_progress;
diff --git a/include/linux/limits.h b/include/linux/limits.h
index eaf2e099f125..2d0f94162fb3 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -5,8 +5,6 @@
5 5
6#define NGROUPS_MAX 65536 /* supplemental group IDs are available */ 6#define NGROUPS_MAX 65536 /* supplemental group IDs are available */
7#define ARG_MAX 131072 /* # bytes of args + environ for exec() */ 7#define ARG_MAX 131072 /* # bytes of args + environ for exec() */
8#define CHILD_MAX 999 /* no limit :-) */
9#define OPEN_MAX 256 /* # open files a process may have */
10#define LINK_MAX 127 /* # links a file may have */ 8#define LINK_MAX 127 /* # links a file may have */
11#define MAX_CANON 255 /* size of the canonical input queue */ 9#define MAX_CANON 255 /* size of the canonical input queue */
12#define MAX_INPUT 255 /* size of the type-ahead buffer */ 10#define MAX_INPUT 255 /* size of the type-ahead buffer */
diff --git a/include/linux/linux_logo.h b/include/linux/linux_logo.h
index 9c01bde5bf1b..08a92969c76e 100644
--- a/include/linux/linux_logo.h
+++ b/include/linux/linux_logo.h
@@ -33,5 +33,13 @@ struct linux_logo {
33}; 33};
34 34
35extern const struct linux_logo *fb_find_logo(int depth); 35extern const struct linux_logo *fb_find_logo(int depth);
36#ifdef CONFIG_FB_LOGO_EXTRA
37extern void fb_append_extra_logo(const struct linux_logo *logo,
38 unsigned int n);
39#else
40static inline void fb_append_extra_logo(const struct linux_logo *logo,
41 unsigned int n)
42{}
43#endif
36 44
37#endif /* _LINUX_LINUX_LOGO_H */ 45#endif /* _LINUX_LINUX_LOGO_H */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 246de1d84a26..6f1637c61e10 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -27,6 +27,7 @@ struct nlmsvc_binding {
27 struct nfs_fh *, 27 struct nfs_fh *,
28 struct file **); 28 struct file **);
29 void (*fclose)(struct file *); 29 void (*fclose)(struct file *);
30 unsigned long (*get_grace_period)(void);
30}; 31};
31 32
32extern struct nlmsvc_binding * nlmsvc_ops; 33extern struct nlmsvc_binding * nlmsvc_ops;
@@ -38,4 +39,12 @@ extern int nlmclnt_proc(struct inode *, int, struct file_lock *);
38extern int lockd_up(int proto); 39extern int lockd_up(int proto);
39extern void lockd_down(void); 40extern void lockd_down(void);
40 41
42unsigned long get_nfs_grace_period(void);
43
44#ifdef CONFIG_NFSD_V4
45unsigned long get_nfs4_grace_period(void);
46#else
47static inline unsigned long get_nfs4_grace_period(void) {return 0;}
48#endif
49
41#endif /* LINUX_LOCKD_BIND_H */ 50#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 9d713c03e3da..36cc20dfd142 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -13,7 +13,6 @@
13#define HPFS_SUPER_MAGIC 0xf995e849 13#define HPFS_SUPER_MAGIC 0xf995e849
14#define ISOFS_SUPER_MAGIC 0x9660 14#define ISOFS_SUPER_MAGIC 0x9660
15#define JFFS2_SUPER_MAGIC 0x72b6 15#define JFFS2_SUPER_MAGIC 0x72b6
16#define KVMFS_SUPER_MAGIC 0x19700426
17#define ANON_INODE_FS_MAGIC 0x09041934 16#define ANON_INODE_FS_MAGIC 0x09041934
18 17
19#define MINIX_SUPER_MAGIC 0x137F /* original minix fs */ 18#define MINIX_SUPER_MAGIC 0x137F /* original minix fs */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index daabb3aa1ec6..e147cf50529f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -159,7 +159,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
159 159
160extern struct mempolicy default_policy; 160extern struct mempolicy default_policy;
161extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 161extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
162 unsigned long addr); 162 unsigned long addr, gfp_t gfp_flags);
163extern unsigned slab_node(struct mempolicy *policy); 163extern unsigned slab_node(struct mempolicy *policy);
164 164
165extern enum zone_type policy_zone; 165extern enum zone_type policy_zone;
@@ -256,9 +256,9 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
256#define set_cpuset_being_rebound(x) do {} while (0) 256#define set_cpuset_being_rebound(x) do {} while (0)
257 257
258static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, 258static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
259 unsigned long addr) 259 unsigned long addr, gfp_t gfp_flags)
260{ 260{
261 return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); 261 return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
262} 262}
263 263
264static inline int do_migrate_pages(struct mm_struct *mm, 264static inline int do_migrate_pages(struct mm_struct *mm,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97d0cddfd223..a5c451816fdc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -599,6 +599,7 @@ static inline struct address_space *page_mapping(struct page *page)
599{ 599{
600 struct address_space *mapping = page->mapping; 600 struct address_space *mapping = page->mapping;
601 601
602 VM_BUG_ON(PageSlab(page));
602 if (unlikely(PageSwapCache(page))) 603 if (unlikely(PageSwapCache(page)))
603 mapping = &swapper_space; 604 mapping = &swapper_space;
604#ifdef CONFIG_SLUB 605#ifdef CONFIG_SLUB
@@ -810,27 +811,31 @@ extern unsigned long do_mremap(unsigned long addr,
810 unsigned long flags, unsigned long new_addr); 811 unsigned long flags, unsigned long new_addr);
811 812
812/* 813/*
813 * Prototype to add a shrinker callback for ageable caches. 814 * A callback you can register to apply pressure to ageable caches.
814 *
815 * These functions are passed a count `nr_to_scan' and a gfpmask. They should
816 * scan `nr_to_scan' objects, attempting to free them.
817 * 815 *
818 * The callback must return the number of objects which remain in the cache. 816 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
817 * look through the least-recently-used 'nr_to_scan' entries and
818 * attempt to free them up. It should return the number of objects
819 * which remain in the cache. If it returns -1, it means it cannot do
820 * any scanning at this time (eg. there is a risk of deadlock).
819 * 821 *
820 * The callback will be passed nr_to_scan == 0 when the VM is querying the 822 * The 'gfpmask' refers to the allocation we are currently trying to
821 * cache size, so a fastpath for that case is appropriate. 823 * fulfil.
822 */ 824 *
823typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask); 825 * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
824 826 * querying the cache size, so a fastpath for that case is appropriate.
825/*
826 * Add an aging callback. The int is the number of 'seeks' it takes
827 * to recreate one of the objects that these functions age.
828 */ 827 */
828struct shrinker {
829 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
830 int seeks; /* seeks to recreate an obj */
829 831
830#define DEFAULT_SEEKS 2 832 /* These are for internal use */
831struct shrinker; 833 struct list_head list;
832extern struct shrinker *set_shrinker(int, shrinker_t); 834 long nr; /* objs pending delete */
833extern void remove_shrinker(struct shrinker *shrinker); 835};
836#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
837extern void register_shrinker(struct shrinker *);
838extern void unregister_shrinker(struct shrinker *);
834 839
835/* 840/*
836 * Some shared mappigns will want the pages marked read-only 841 * Some shared mappigns will want the pages marked read-only
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 04b1636a970b..da8eb8ad9e9b 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -24,6 +24,14 @@
24#endif 24#endif
25#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 25#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
26 26
27/*
28 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
29 * costly to service. That is between allocation orders which should
30 * coelesce naturally under reasonable reclaim pressure and those which
31 * will not.
32 */
33#define PAGE_ALLOC_COSTLY_ORDER 3
34
27struct free_area { 35struct free_area {
28 struct list_head free_list; 36 struct list_head free_list;
29 unsigned long nr_free; 37 unsigned long nr_free;
@@ -146,6 +154,7 @@ enum zone_type {
146 */ 154 */
147 ZONE_HIGHMEM, 155 ZONE_HIGHMEM,
148#endif 156#endif
157 ZONE_MOVABLE,
149 MAX_NR_ZONES 158 MAX_NR_ZONES
150}; 159};
151 160
@@ -167,6 +176,7 @@ enum zone_type {
167 + defined(CONFIG_ZONE_DMA32) \ 176 + defined(CONFIG_ZONE_DMA32) \
168 + 1 \ 177 + 1 \
169 + defined(CONFIG_HIGHMEM) \ 178 + defined(CONFIG_HIGHMEM) \
179 + 1 \
170) 180)
171#if __ZONE_COUNT < 2 181#if __ZONE_COUNT < 2
172#define ZONES_SHIFT 0 182#define ZONES_SHIFT 0
@@ -499,10 +509,22 @@ static inline int populated_zone(struct zone *zone)
499 return (!!zone->present_pages); 509 return (!!zone->present_pages);
500} 510}
501 511
512extern int movable_zone;
513
514static inline int zone_movable_is_highmem(void)
515{
516#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
517 return movable_zone == ZONE_HIGHMEM;
518#else
519 return 0;
520#endif
521}
522
502static inline int is_highmem_idx(enum zone_type idx) 523static inline int is_highmem_idx(enum zone_type idx)
503{ 524{
504#ifdef CONFIG_HIGHMEM 525#ifdef CONFIG_HIGHMEM
505 return (idx == ZONE_HIGHMEM); 526 return (idx == ZONE_HIGHMEM ||
527 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
506#else 528#else
507 return 0; 529 return 0;
508#endif 530#endif
@@ -522,7 +544,9 @@ static inline int is_normal_idx(enum zone_type idx)
522static inline int is_highmem(struct zone *zone) 544static inline int is_highmem(struct zone *zone)
523{ 545{
524#ifdef CONFIG_HIGHMEM 546#ifdef CONFIG_HIGHMEM
525 return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; 547 int zone_idx = zone - zone->zone_pgdat->node_zones;
548 return zone_idx == ZONE_HIGHMEM ||
549 (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
526#else 550#else
527 return 0; 551 return 0;
528#endif 552#endif
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index 9f62d6182d32..78feb7beff75 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -42,6 +42,9 @@
42#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ 42#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
43#define NFSEXP_ALLFLAGS 0xFE3F 43#define NFSEXP_ALLFLAGS 0xFE3F
44 44
45/* The flags that may vary depending on security flavor: */
46#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
47 | NFSEXP_ALLSQUASH)
45 48
46#ifdef __KERNEL__ 49#ifdef __KERNEL__
47 50
@@ -64,6 +67,19 @@ struct nfsd4_fs_locations {
64 int migrated; 67 int migrated;
65}; 68};
66 69
70/*
71 * We keep an array of pseudoflavors with the export, in order from most
72 * to least preferred. For the forseeable future, we don't expect more
73 * than the eight pseudoflavors null, unix, krb5, krb5i, krb5p, skpm3,
74 * spkm3i, and spkm3p (and using all 8 at once should be rare).
75 */
76#define MAX_SECINFO_LIST 8
77
78struct exp_flavor_info {
79 u32 pseudoflavor;
80 u32 flags;
81};
82
67struct svc_export { 83struct svc_export {
68 struct cache_head h; 84 struct cache_head h;
69 struct auth_domain * ex_client; 85 struct auth_domain * ex_client;
@@ -76,6 +92,8 @@ struct svc_export {
76 int ex_fsid; 92 int ex_fsid;
77 unsigned char * ex_uuid; /* 16 byte fsid */ 93 unsigned char * ex_uuid; /* 16 byte fsid */
78 struct nfsd4_fs_locations ex_fslocs; 94 struct nfsd4_fs_locations ex_fslocs;
95 int ex_nflavors;
96 struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
79}; 97};
80 98
81/* an "export key" (expkey) maps a filehandlefragement to an 99/* an "export key" (expkey) maps a filehandlefragement to an
@@ -95,10 +113,22 @@ struct svc_expkey {
95 113
96#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) 114#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
97#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) 115#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
98#define EX_RDONLY(exp) ((exp)->ex_flags & NFSEXP_READONLY)
99#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) 116#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
100#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) 117#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
101 118
119static inline int EX_RDONLY(struct svc_export *exp, struct svc_rqst *rqstp)
120{
121 struct exp_flavor_info *f;
122 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
123
124 for (f = exp->ex_flavors; f < end; f++) {
125 if (f->pseudoflavor == rqstp->rq_flavor)
126 return f->flags & NFSEXP_READONLY;
127 }
128 return exp->ex_flags & NFSEXP_READONLY;
129}
130
131__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
102 132
103/* 133/*
104 * Function declarations 134 * Function declarations
@@ -112,13 +142,19 @@ struct svc_export * exp_get_by_name(struct auth_domain *clp,
112 struct vfsmount *mnt, 142 struct vfsmount *mnt,
113 struct dentry *dentry, 143 struct dentry *dentry,
114 struct cache_req *reqp); 144 struct cache_req *reqp);
145struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
146 struct vfsmount *,
147 struct dentry *);
115struct svc_export * exp_parent(struct auth_domain *clp, 148struct svc_export * exp_parent(struct auth_domain *clp,
116 struct vfsmount *mnt, 149 struct vfsmount *mnt,
117 struct dentry *dentry, 150 struct dentry *dentry,
118 struct cache_req *reqp); 151 struct cache_req *reqp);
152struct svc_export * rqst_exp_parent(struct svc_rqst *,
153 struct vfsmount *mnt,
154 struct dentry *dentry);
119int exp_rootfh(struct auth_domain *, 155int exp_rootfh(struct auth_domain *,
120 char *path, struct knfsd_fh *, int maxsize); 156 char *path, struct knfsd_fh *, int maxsize);
121__be32 exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq); 157__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
122__be32 nfserrno(int errno); 158__be32 nfserrno(int errno);
123 159
124extern struct cache_detail svc_export_cache; 160extern struct cache_detail svc_export_cache;
@@ -135,6 +171,7 @@ static inline void exp_get(struct svc_export *exp)
135extern struct svc_export * 171extern struct svc_export *
136exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, 172exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv,
137 struct cache_req *reqp); 173 struct cache_req *reqp);
174struct svc_export * rqst_exp_find(struct svc_rqst *, int, u32 *);
138 175
139#endif /* __KERNEL__ */ 176#endif /* __KERNEL__ */
140 177
diff --git a/include/linux/nfsd/interface.h b/include/linux/nfsd/interface.h
deleted file mode 100644
index af0979704afb..000000000000
--- a/include/linux/nfsd/interface.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/*
2 * include/linux/nfsd/interface.h
3 *
4 * defines interface between nfsd and other bits of
5 * the kernel. Particularly filesystems (eventually).
6 *
7 * Copyright (C) 2000 Neil Brown <neilb@cse.unsw.edu.au>
8 */
9
10#ifndef LINUX_NFSD_INTERFACE_H
11#define LINUX_NFSD_INTERFACE_H
12
13#endif /* LINUX_NFSD_INTERFACE_H */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index 72feac581aa3..e452256d3f72 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -22,7 +22,6 @@
22#include <linux/nfsd/export.h> 22#include <linux/nfsd/export.h>
23#include <linux/nfsd/auth.h> 23#include <linux/nfsd/auth.h>
24#include <linux/nfsd/stats.h> 24#include <linux/nfsd/stats.h>
25#include <linux/nfsd/interface.h>
26/* 25/*
27 * nfsd version 26 * nfsd version
28 */ 27 */
@@ -72,6 +71,9 @@ int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
72 struct svc_export **expp); 71 struct svc_export **expp);
73__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *, 72__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
74 const char *, int, struct svc_fh *); 73 const char *, int, struct svc_fh *);
74__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
75 const char *, int,
76 struct svc_export **, struct dentry **);
75__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, 77__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
76 struct iattr *, int, time_t); 78 struct iattr *, int, time_t);
77#ifdef CONFIG_NFSD_V4 79#ifdef CONFIG_NFSD_V4
@@ -120,7 +122,8 @@ __be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
120 struct kstatfs *); 122 struct kstatfs *);
121 123
122int nfsd_notify_change(struct inode *, struct iattr *); 124int nfsd_notify_change(struct inode *, struct iattr *);
123__be32 nfsd_permission(struct svc_export *, struct dentry *, int); 125__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
126 struct dentry *, int);
124int nfsd_sync_dir(struct dentry *dp); 127int nfsd_sync_dir(struct dentry *dp);
125 128
126#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) 129#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
@@ -149,6 +152,7 @@ extern int nfsd_max_blksize;
149 * NFSv4 State 152 * NFSv4 State
150 */ 153 */
151#ifdef CONFIG_NFSD_V4 154#ifdef CONFIG_NFSD_V4
155extern unsigned int max_delegations;
152void nfs4_state_init(void); 156void nfs4_state_init(void);
153int nfs4_state_start(void); 157int nfs4_state_start(void);
154void nfs4_state_shutdown(void); 158void nfs4_state_shutdown(void);
@@ -236,6 +240,7 @@ void nfsd_lockd_shutdown(void);
236#define nfserr_badname __constant_htonl(NFSERR_BADNAME) 240#define nfserr_badname __constant_htonl(NFSERR_BADNAME)
237#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) 241#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN)
238#define nfserr_locked __constant_htonl(NFSERR_LOCKED) 242#define nfserr_locked __constant_htonl(NFSERR_LOCKED)
243#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC)
239#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) 244#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME)
240 245
241/* error codes for internal use */ 246/* error codes for internal use */
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index ab5c236bd9a7..db348f749376 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -67,7 +67,7 @@ struct nfs4_cb_recall {
67 int cbr_trunc; 67 int cbr_trunc;
68 stateid_t cbr_stateid; 68 stateid_t cbr_stateid;
69 u32 cbr_fhlen; 69 u32 cbr_fhlen;
70 u32 cbr_fhval[NFS4_FHSIZE]; 70 char cbr_fhval[NFS4_FHSIZE];
71 struct nfs4_delegation *cbr_dp; 71 struct nfs4_delegation *cbr_dp;
72}; 72};
73 73
@@ -224,6 +224,7 @@ struct nfs4_file {
224 struct inode *fi_inode; 224 struct inode *fi_inode;
225 u32 fi_id; /* used with stateowner->so_id 225 u32 fi_id; /* used with stateowner->so_id
226 * for stateid_hashtbl hash */ 226 * for stateid_hashtbl hash */
227 bool fi_had_conflict;
227}; 228};
228 229
229/* 230/*
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 09799bcee0ac..1b653267133a 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -293,6 +293,12 @@ struct nfsd4_rename {
293 struct nfsd4_change_info rn_tinfo; /* response */ 293 struct nfsd4_change_info rn_tinfo; /* response */
294}; 294};
295 295
296struct nfsd4_secinfo {
297 u32 si_namelen; /* request */
298 char *si_name; /* request */
299 struct svc_export *si_exp; /* response */
300};
301
296struct nfsd4_setattr { 302struct nfsd4_setattr {
297 stateid_t sa_stateid; /* request */ 303 stateid_t sa_stateid; /* request */
298 u32 sa_bmval[2]; /* request */ 304 u32 sa_bmval[2]; /* request */
@@ -365,6 +371,7 @@ struct nfsd4_op {
365 struct nfsd4_remove remove; 371 struct nfsd4_remove remove;
366 struct nfsd4_rename rename; 372 struct nfsd4_rename rename;
367 clientid_t renew; 373 clientid_t renew;
374 struct nfsd4_secinfo secinfo;
368 struct nfsd4_setattr setattr; 375 struct nfsd4_setattr setattr;
369 struct nfsd4_setclientid setclientid; 376 struct nfsd4_setclientid setclientid;
370 struct nfsd4_setclientid_confirm setclientid_confirm; 377 struct nfsd4_setclientid_confirm setclientid_confirm;
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 9431101bf876..576f2bb34cc8 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -196,6 +196,8 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
196#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ 196#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
197#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ 197#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */
198#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ 198#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */
199#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task,
200 * not handling interrupts, soon dead */
199 201
200/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend 202/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
201 * operation in progress 203 * operation in progress
@@ -208,6 +210,7 @@ extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
208#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) 210#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
209#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) 211#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
210#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) 212#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
213#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
211 214
212#endif /* __KERNEL__ */ 215#endif /* __KERNEL__ */
213#endif /* _LINUX_NOTIFIER_H */ 216#endif /* _LINUX_NOTIFIER_H */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index eeb1976ef7bf..ae8146abd746 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -110,6 +110,8 @@ static inline void ptrace_unlink(struct task_struct *child)
110 __ptrace_unlink(child); 110 __ptrace_unlink(child);
111} 111}
112 112
113int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data);
114int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data);
113 115
114#ifndef force_successful_syscall_return 116#ifndef force_successful_syscall_return
115/* 117/*
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index dd5a05d03d4f..75e17a05540e 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -262,7 +262,7 @@ int bitmap_active(struct bitmap *bitmap);
262 262
263char *file_path(struct file *file, char *buf, int count); 263char *file_path(struct file *file, char *buf, int count);
264void bitmap_print_sb(struct bitmap *bitmap); 264void bitmap_print_sb(struct bitmap *bitmap);
265int bitmap_update_sb(struct bitmap *bitmap); 265void bitmap_update_sb(struct bitmap *bitmap);
266 266
267int bitmap_setallbits(struct bitmap *bitmap); 267int bitmap_setallbits(struct bitmap *bitmap);
268void bitmap_write_all(struct bitmap *bitmap); 268void bitmap_write_all(struct bitmap *bitmap);
@@ -278,8 +278,8 @@ int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int d
278void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); 278void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
279void bitmap_close_sync(struct bitmap *bitmap); 279void bitmap_close_sync(struct bitmap *bitmap);
280 280
281int bitmap_unplug(struct bitmap *bitmap); 281void bitmap_unplug(struct bitmap *bitmap);
282int bitmap_daemon_work(struct bitmap *bitmap); 282void bitmap_daemon_work(struct bitmap *bitmap);
283#endif 283#endif
284 284
285#endif 285#endif
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index de72c49747c8..28ac632b42dd 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -51,7 +51,7 @@ struct mdk_rdev_s
51 51
52 sector_t size; /* Device size (in blocks) */ 52 sector_t size; /* Device size (in blocks) */
53 mddev_t *mddev; /* RAID array if running */ 53 mddev_t *mddev; /* RAID array if running */
54 unsigned long last_events; /* IO event timestamp */ 54 long last_events; /* IO event timestamp */
55 55
56 struct block_device *bdev; /* block device handle */ 56 struct block_device *bdev; /* block device handle */
57 57
diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h
new file mode 100644
index 000000000000..e8c7c21ceb1f
--- /dev/null
+++ b/include/linux/rtc/m48t59.h
@@ -0,0 +1,57 @@
1/*
2 * include/linux/rtc/m48t59.h
3 *
4 * Definitions for the platform data of m48t59 RTC chip driver.
5 *
6 * Copyright (c) 2007 Wind River Systems, Inc.
7 *
8 * Mark Zhan <rongkai.zhan@windriver.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef _LINUX_RTC_M48T59_H_
16#define _LINUX_RTC_M48T59_H_
17
18/*
19 * M48T59 Register Offset
20 */
21#define M48T59_YEAR 0x1fff
22#define M48T59_MONTH 0x1ffe
23#define M48T59_MDAY 0x1ffd /* Day of Month */
24#define M48T59_WDAY 0x1ffc /* Day of Week */
25#define M48T59_WDAY_CB 0x20 /* Century Bit */
26#define M48T59_WDAY_CEB 0x10 /* Century Enable Bit */
27#define M48T59_HOUR 0x1ffb
28#define M48T59_MIN 0x1ffa
29#define M48T59_SEC 0x1ff9
30#define M48T59_CNTL 0x1ff8
31#define M48T59_CNTL_READ 0x40
32#define M48T59_CNTL_WRITE 0x80
33#define M48T59_WATCHDOG 0x1ff7
34#define M48T59_INTR 0x1ff6
35#define M48T59_INTR_AFE 0x80 /* Alarm Interrupt Enable */
36#define M48T59_INTR_ABE 0x20
37#define M48T59_ALARM_DATE 0x1ff5
38#define M48T59_ALARM_HOUR 0x1ff4
39#define M48T59_ALARM_MIN 0x1ff3
40#define M48T59_ALARM_SEC 0x1ff2
41#define M48T59_UNUSED 0x1ff1
42#define M48T59_FLAGS 0x1ff0
43#define M48T59_FLAGS_WDT 0x80 /* watchdog timer expired */
44#define M48T59_FLAGS_AF 0x40 /* alarm */
45#define M48T59_FLAGS_BF 0x10 /* low battery */
46
47#define M48T59_NVRAM_SIZE 0x1ff0
48
49struct m48t59_plat_data {
50 /* The method to access M48T59 registers,
51 * NOTE: The 'ofs' should be 0x00~0x1fff
52 */
53 void (*write_byte)(struct device *dev, u32 ofs, u8 val);
54 unsigned char (*read_byte)(struct device *dev, u32 ofs);
55};
56
57#endif /* _LINUX_RTC_M48T59_H_ */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 7f2c99d66e9d..9c721cd2c9d6 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -142,6 +142,9 @@
142/* Micrel KS8695 */ 142/* Micrel KS8695 */
143#define PORT_KS8695 76 143#define PORT_KS8695 76
144 144
145/* Broadcom SB1250, etc. SOC */
146#define PORT_SB1250_DUART 77
147
145 148
146#ifdef __KERNEL__ 149#ifdef __KERNEL__
147 150
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 27402fea9b79..0e1d0daef6a2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -31,6 +31,19 @@
31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
32 32
33/* 33/*
34 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
35 *
36 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
37 *
38 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
39 * Both make kfree a no-op.
40 */
41#define ZERO_SIZE_PTR ((void *)16)
42
43#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \
44 (unsigned long)ZERO_SIZE_PTR)
45
46/*
34 * struct kmem_cache related prototypes 47 * struct kmem_cache related prototypes
35 */ 48 */
36void __init kmem_cache_init(void); 49void __init kmem_cache_init(void);
@@ -42,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
42 void (*)(void *, struct kmem_cache *, unsigned long)); 55 void (*)(void *, struct kmem_cache *, unsigned long));
43void kmem_cache_destroy(struct kmem_cache *); 56void kmem_cache_destroy(struct kmem_cache *);
44int kmem_cache_shrink(struct kmem_cache *); 57int kmem_cache_shrink(struct kmem_cache *);
45void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
46void kmem_cache_free(struct kmem_cache *, void *); 58void kmem_cache_free(struct kmem_cache *, void *);
47unsigned int kmem_cache_size(struct kmem_cache *); 59unsigned int kmem_cache_size(struct kmem_cache *);
48const char *kmem_cache_name(struct kmem_cache *); 60const char *kmem_cache_name(struct kmem_cache *);
@@ -78,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
78/* 90/*
79 * Common kmalloc functions provided by all allocators 91 * Common kmalloc functions provided by all allocators
80 */ 92 */
81void *__kzalloc(size_t, gfp_t);
82void * __must_check krealloc(const void *, size_t, gfp_t); 93void * __must_check krealloc(const void *, size_t, gfp_t);
83void kfree(const void *); 94void kfree(const void *);
84size_t ksize(const void *); 95size_t ksize(const void *);
85 96
97/*
98 * Allocator specific definitions. These are mainly used to establish optimized
99 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
100 * selecting the appropriate general cache at compile time.
101 *
102 * Allocators must define at least:
103 *
104 * kmem_cache_alloc()
105 * __kmalloc()
106 * kmalloc()
107 *
108 * Those wishing to support NUMA must also define:
109 *
110 * kmem_cache_alloc_node()
111 * kmalloc_node()
112 *
113 * See each allocator definition file for additional comments and
114 * implementation notes.
115 */
116#ifdef CONFIG_SLUB
117#include <linux/slub_def.h>
118#elif defined(CONFIG_SLOB)
119#include <linux/slob_def.h>
120#else
121#include <linux/slab_def.h>
122#endif
123
86/** 124/**
87 * kcalloc - allocate memory for an array. The memory is set to zero. 125 * kcalloc - allocate memory for an array. The memory is set to zero.
88 * @n: number of elements. 126 * @n: number of elements.
@@ -138,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
138{ 176{
139 if (n != 0 && size > ULONG_MAX / n) 177 if (n != 0 && size > ULONG_MAX / n)
140 return NULL; 178 return NULL;
141 return __kzalloc(n * size, flags); 179 return __kmalloc(n * size, flags | __GFP_ZERO);
142} 180}
143 181
144/*
145 * Allocator specific definitions. These are mainly used to establish optimized
146 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
147 * selecting the appropriate general cache at compile time.
148 *
149 * Allocators must define at least:
150 *
151 * kmem_cache_alloc()
152 * __kmalloc()
153 * kmalloc()
154 * kzalloc()
155 *
156 * Those wishing to support NUMA must also define:
157 *
158 * kmem_cache_alloc_node()
159 * kmalloc_node()
160 *
161 * See each allocator definition file for additional comments and
162 * implementation notes.
163 */
164#ifdef CONFIG_SLUB
165#include <linux/slub_def.h>
166#elif defined(CONFIG_SLOB)
167#include <linux/slob_def.h>
168#else
169#include <linux/slab_def.h>
170#endif
171
172#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) 182#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
173/** 183/**
174 * kmalloc_node - allocate memory from a specific node 184 * kmalloc_node - allocate memory from a specific node
@@ -242,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
242 252
243#endif /* DEBUG_SLAB */ 253#endif /* DEBUG_SLAB */
244 254
255/*
256 * Shortcuts
257 */
258static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
259{
260 return kmem_cache_alloc(k, flags | __GFP_ZERO);
261}
262
263/**
264 * kzalloc - allocate memory. The memory is set to zero.
265 * @size: how many bytes of memory are required.
266 * @flags: the type of memory to allocate (see kmalloc).
267 */
268static inline void *kzalloc(size_t size, gfp_t flags)
269{
270 return kmalloc(size, flags | __GFP_ZERO);
271}
272
245#endif /* __KERNEL__ */ 273#endif /* __KERNEL__ */
246#endif /* _LINUX_SLAB_H */ 274#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 365d036c454a..32bdc2ffd715 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -32,6 +32,10 @@ static inline void *kmalloc(size_t size, gfp_t flags)
32{ 32{
33 if (__builtin_constant_p(size)) { 33 if (__builtin_constant_p(size)) {
34 int i = 0; 34 int i = 0;
35
36 if (!size)
37 return ZERO_SIZE_PTR;
38
35#define CACHE(x) \ 39#define CACHE(x) \
36 if (size <= x) \ 40 if (size <= x) \
37 goto found; \ 41 goto found; \
@@ -54,32 +58,6 @@ found:
54 return __kmalloc(size, flags); 58 return __kmalloc(size, flags);
55} 59}
56 60
57static inline void *kzalloc(size_t size, gfp_t flags)
58{
59 if (__builtin_constant_p(size)) {
60 int i = 0;
61#define CACHE(x) \
62 if (size <= x) \
63 goto found; \
64 else \
65 i++;
66#include "kmalloc_sizes.h"
67#undef CACHE
68 {
69 extern void __you_cannot_kzalloc_that_much(void);
70 __you_cannot_kzalloc_that_much();
71 }
72found:
73#ifdef CONFIG_ZONE_DMA
74 if (flags & GFP_DMA)
75 return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
76 flags);
77#endif
78 return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
79 }
80 return __kzalloc(size, flags);
81}
82
83#ifdef CONFIG_NUMA 61#ifdef CONFIG_NUMA
84extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 62extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
85extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 63extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
@@ -88,6 +66,10 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
88{ 66{
89 if (__builtin_constant_p(size)) { 67 if (__builtin_constant_p(size)) {
90 int i = 0; 68 int i = 0;
69
70 if (!size)
71 return ZERO_SIZE_PTR;
72
91#define CACHE(x) \ 73#define CACHE(x) \
92 if (size <= x) \ 74 if (size <= x) \
93 goto found; \ 75 goto found; \
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a582f6771525..07f7e4cbcee3 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -16,7 +16,9 @@ struct kmem_cache_node {
16 unsigned long nr_partial; 16 unsigned long nr_partial;
17 atomic_long_t nr_slabs; 17 atomic_long_t nr_slabs;
18 struct list_head partial; 18 struct list_head partial;
19#ifdef CONFIG_SLUB_DEBUG
19 struct list_head full; 20 struct list_head full;
21#endif
20}; 22};
21 23
22/* 24/*
@@ -44,7 +46,9 @@ struct kmem_cache {
44 int align; /* Alignment */ 46 int align; /* Alignment */
45 const char *name; /* Name (only for display!) */ 47 const char *name; /* Name (only for display!) */
46 struct list_head list; /* List of slab caches */ 48 struct list_head list; /* List of slab caches */
49#ifdef CONFIG_SLUB_DEBUG
47 struct kobject kobj; /* For sysfs */ 50 struct kobject kobj; /* For sysfs */
51#endif
48 52
49#ifdef CONFIG_NUMA 53#ifdef CONFIG_NUMA
50 int defrag_ratio; 54 int defrag_ratio;
@@ -159,18 +163,6 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
159#define SLUB_DMA 0 163#define SLUB_DMA 0
160#endif 164#endif
161 165
162
163/*
164 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
165 *
166 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
167 *
168 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
169 * Both make kfree a no-op.
170 */
171#define ZERO_SIZE_PTR ((void *)16)
172
173
174void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 166void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
175void *__kmalloc(size_t size, gfp_t flags); 167void *__kmalloc(size_t size, gfp_t flags);
176 168
@@ -187,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags)
187 return __kmalloc(size, flags); 179 return __kmalloc(size, flags);
188} 180}
189 181
190static inline void *kzalloc(size_t size, gfp_t flags)
191{
192 if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
193 struct kmem_cache *s = kmalloc_slab(size);
194
195 if (!s)
196 return ZERO_SIZE_PTR;
197
198 return kmem_cache_zalloc(s, flags);
199 } else
200 return __kzalloc(size, flags);
201}
202
203#ifdef CONFIG_NUMA 182#ifdef CONFIG_NUMA
204void *__kmalloc_node(size_t size, gfp_t flags, int node); 183void *__kmalloc_node(size_t size, gfp_t flags, int node);
205void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 184void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 96ac21f8dd73..259a13c3bd98 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -99,11 +99,14 @@ static inline int up_smp_call_function(void)
99static inline void smp_send_reschedule(int cpu) { } 99static inline void smp_send_reschedule(int cpu) { }
100#define num_booting_cpus() 1 100#define num_booting_cpus() 1
101#define smp_prepare_boot_cpu() do {} while (0) 101#define smp_prepare_boot_cpu() do {} while (0)
102static inline int smp_call_function_single(int cpuid, void (*func) (void *info), 102#define smp_call_function_single(cpuid, func, info, retry, wait) \
103 void *info, int retry, int wait) 103({ \
104{ 104 WARN_ON(cpuid != 0); \
105 return -EBUSY; 105 local_irq_disable(); \
106} 106 (func)(info); \
107 local_irq_enable(); \
108 0; \
109})
107 110
108#endif /* !SMP */ 111#endif /* !SMP */
109 112
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 1be5ea059477..302b81d1d117 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -76,6 +76,7 @@ struct spi_device {
76#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) 76#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
77#define SPI_CS_HIGH 0x04 /* chipselect active high? */ 77#define SPI_CS_HIGH 0x04 /* chipselect active high? */
78#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ 78#define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
79#define SPI_3WIRE 0x10 /* SI/SO signals shared */
79 u8 bits_per_word; 80 u8 bits_per_word;
80 int irq; 81 int irq;
81 void *controller_state; 82 void *controller_state;
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 9dbca629dcfb..b8db32cea1de 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -26,6 +26,7 @@ struct spi_bitbang {
26 struct list_head queue; 26 struct list_head queue;
27 u8 busy; 27 u8 busy;
28 u8 use_dma; 28 u8 use_dma;
29 u8 flags; /* extra spi->mode support */
29 30
30 struct spi_master *master; 31 struct spi_master *master;
31 32
diff --git a/include/linux/spi/tle62x0.h b/include/linux/spi/tle62x0.h
new file mode 100644
index 000000000000..60b59187e590
--- /dev/null
+++ b/include/linux/spi/tle62x0.h
@@ -0,0 +1,24 @@
1/*
2 * tle62x0.h - platform glue to Infineon TLE62x0 driver chips
3 *
4 * Copyright 2007 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/
20
21struct tle62x0_pdata {
22 unsigned int init_state;
23 unsigned int gpio_count;
24};
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index bbac101ac372..459c5fc11d51 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -58,6 +58,7 @@ u32 gss_unwrap(
58u32 gss_delete_sec_context( 58u32 gss_delete_sec_context(
59 struct gss_ctx **ctx_id); 59 struct gss_ctx **ctx_id);
60 60
61u32 gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 service);
61u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); 62u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
62char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); 63char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
63 64
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 129d50f2225c..8531a70da73d 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -212,6 +212,7 @@ struct svc_rqst {
212 struct svc_pool * rq_pool; /* thread pool */ 212 struct svc_pool * rq_pool; /* thread pool */
213 struct svc_procedure * rq_procinfo; /* procedure info */ 213 struct svc_procedure * rq_procinfo; /* procedure info */
214 struct auth_ops * rq_authop; /* authentication flavour */ 214 struct auth_ops * rq_authop; /* authentication flavour */
215 u32 rq_flavor; /* pseudoflavor */
215 struct svc_cred rq_cred; /* auth info */ 216 struct svc_cred rq_cred; /* auth info */
216 struct sk_buff * rq_skbuff; /* fast recv inet buffer */ 217 struct sk_buff * rq_skbuff; /* fast recv inet buffer */
217 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 218 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
@@ -248,6 +249,7 @@ struct svc_rqst {
248 */ 249 */
249 /* Catering to nfsd */ 250 /* Catering to nfsd */
250 struct auth_domain * rq_client; /* RPC peer info */ 251 struct auth_domain * rq_client; /* RPC peer info */
252 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
251 struct svc_cacherep * rq_cacherep; /* cache info */ 253 struct svc_cacherep * rq_cacherep; /* cache info */
252 struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to 254 struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
253 * determine what device number 255 * determine what device number
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index de92619b0826..22e1ef8e200e 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -127,6 +127,7 @@ extern struct auth_domain *auth_unix_lookup(struct in_addr addr);
127extern int auth_unix_forget_old(struct auth_domain *dom); 127extern int auth_unix_forget_old(struct auth_domain *dom);
128extern void svcauth_unix_purge(void); 128extern void svcauth_unix_purge(void);
129extern void svcauth_unix_info_release(void *); 129extern void svcauth_unix_info_release(void *);
130extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
130 131
131static inline unsigned long hash_str(char *name, int bits) 132static inline unsigned long hash_str(char *name, int bits)
132{ 133{
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 5a5db16ab660..417a1def56db 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -22,6 +22,7 @@
22int gss_svc_init(void); 22int gss_svc_init(void);
23void gss_svc_shutdown(void); 23void gss_svc_shutdown(void);
24int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); 24int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
25u32 svcauth_gss_flavor(struct auth_domain *dom);
25 26
26#endif /* __KERNEL__ */ 27#endif /* __KERNEL__ */
27#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ 28#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 006868881346..665f85f2a3af 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -188,7 +188,8 @@ extern int rotate_reclaimable_page(struct page *page);
188extern void swap_setup(void); 188extern void swap_setup(void);
189 189
190/* linux/mm/vmscan.c */ 190/* linux/mm/vmscan.c */
191extern unsigned long try_to_free_pages(struct zone **, gfp_t); 191extern unsigned long try_to_free_pages(struct zone **zones, int order,
192 gfp_t gfp_mask);
192extern unsigned long shrink_all_memory(unsigned long nr_pages); 193extern unsigned long shrink_all_memory(unsigned long nr_pages);
193extern int vm_swappiness; 194extern int vm_swappiness;
194extern int remove_mapping(struct address_space *mapping, struct page *page); 195extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/include/linux/time.h b/include/linux/time.h
index 4bb05a829be9..ec3b0ced0afe 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -36,7 +36,8 @@ struct timezone {
36#define NSEC_PER_SEC 1000000000L 36#define NSEC_PER_SEC 1000000000L
37#define FSEC_PER_SEC 1000000000000000L 37#define FSEC_PER_SEC 1000000000000000L
38 38
39static inline int timespec_equal(struct timespec *a, struct timespec *b) 39static inline int timespec_equal(const struct timespec *a,
40 const struct timespec *b)
40{ 41{
41 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); 42 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
42} 43}
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d9325cf8a134..75370ec0923e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,7 +25,7 @@
25#define HIGHMEM_ZONE(xx) 25#define HIGHMEM_ZONE(xx)
26#endif 26#endif
27 27
28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) 28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
29 29
30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
31 FOR_ALL_ZONES(PGALLOC), 31 FOR_ALL_ZONES(PGALLOC),
@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node,
170#ifdef CONFIG_HIGHMEM 170#ifdef CONFIG_HIGHMEM
171 zone_page_state(&zones[ZONE_HIGHMEM], item) + 171 zone_page_state(&zones[ZONE_HIGHMEM], item) +
172#endif 172#endif
173 zone_page_state(&zones[ZONE_NORMAL], item); 173 zone_page_state(&zones[ZONE_NORMAL], item) +
174 zone_page_state(&zones[ZONE_MOVABLE], item);
174} 175}
175 176
176extern void zone_statistics(struct zonelist *, struct zone *); 177extern void zone_statistics(struct zonelist *, struct zone *);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index d961635d0e61..699b7e9864fa 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -75,6 +75,8 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc);
75int vt_waitactive(int vt); 75int vt_waitactive(int vt);
76void change_console(struct vc_data *new_vc); 76void change_console(struct vc_data *new_vc);
77void reset_vc(struct vc_data *vc); 77void reset_vc(struct vc_data *vc);
78extern int unbind_con_driver(const struct consw *csw, int first, int last,
79 int deflt);
78 80
79/* 81/*
80 * vc_screen.c shares this temporary buffer with the console write code so that 82 * vc_screen.c shares this temporary buffer with the console write code so that
diff --git a/include/net/scm.h b/include/net/scm.h
index 5637d5e22d5f..423cb1d5ac25 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -8,7 +8,7 @@
8/* Well, we should have at least one descriptor open 8/* Well, we should have at least one descriptor open
9 * to accept passed FDs 8) 9 * to accept passed FDs 8)
10 */ 10 */
11#define SCM_MAX_FD (OPEN_MAX-1) 11#define SCM_MAX_FD 255
12 12
13struct scm_fp_list 13struct scm_fp_list
14{ 14{
diff --git a/include/video/tgafb.h b/include/video/tgafb.h
index 03d0dbe293a8..7bc5e2c14826 100644
--- a/include/video/tgafb.h
+++ b/include/video/tgafb.h
@@ -216,6 +216,7 @@ struct tga_par {
216 u32 pll_freq; /* pixclock in mhz */ 216 u32 pll_freq; /* pixclock in mhz */
217 u32 bits_per_pixel; /* bits per pixel */ 217 u32 bits_per_pixel; /* bits per pixel */
218 u32 sync_on_green; /* set if sync is on green */ 218 u32 sync_on_green; /* set if sync is on green */
219 u32 palette[16];
219}; 220};
220 221
221 222
diff --git a/init/Kconfig b/init/Kconfig
index 0b0e29ed82d1..e2056828dc64 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -546,7 +546,7 @@ config SLUB_DEBUG
546 546
547choice 547choice
548 prompt "Choose SLAB allocator" 548 prompt "Choose SLAB allocator"
549 default SLAB 549 default SLUB
550 help 550 help
551 This option allows to select a slab allocator. 551 This option allows to select a slab allocator.
552 552
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index b222ce9e1c8b..a6b4c0c08e13 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -56,12 +56,9 @@ static void __init handle_initrd(void)
56 sys_chroot("."); 56 sys_chroot(".");
57 57
58 pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); 58 pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD);
59 if (pid > 0) { 59 if (pid > 0)
60 while (pid != sys_wait4(-1, NULL, 0, NULL)) { 60 while (pid != sys_wait4(-1, NULL, 0, NULL))
61 try_to_freeze();
62 yield(); 61 yield();
63 }
64 }
65 62
66 /* move initrd to rootfs' /old */ 63 /* move initrd to rootfs' /old */
67 sys_fchdir(old_fd); 64 sys_fchdir(old_fd);
diff --git a/ipc/msg.c b/ipc/msg.c
index cbd27e519943..a03fcb522fff 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -385,7 +385,7 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
385asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) 385asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
386{ 386{
387 struct kern_ipc_perm *ipcp; 387 struct kern_ipc_perm *ipcp;
388 struct msq_setbuf setbuf; 388 struct msq_setbuf uninitialized_var(setbuf);
389 struct msg_queue *msq; 389 struct msg_queue *msq;
390 int err, version; 390 int err, version;
391 struct ipc_namespace *ns; 391 struct ipc_namespace *ns;
@@ -509,7 +509,7 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
509 err = audit_ipc_obj(ipcp); 509 err = audit_ipc_obj(ipcp);
510 if (err) 510 if (err)
511 goto out_unlock_up; 511 goto out_unlock_up;
512 if (cmd==IPC_SET) { 512 if (cmd == IPC_SET) {
513 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, 513 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
514 setbuf.mode); 514 setbuf.mode);
515 if (err) 515 if (err)
diff --git a/ipc/sem.c b/ipc/sem.c
index 89bfdffb38d8..b676fef6d208 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -856,7 +856,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
856{ 856{
857 struct sem_array *sma; 857 struct sem_array *sma;
858 int err; 858 int err;
859 struct sem_setbuf setbuf; 859 struct sem_setbuf uninitialized_var(setbuf);
860 struct kern_ipc_perm *ipcp; 860 struct kern_ipc_perm *ipcp;
861 861
862 if(cmd == IPC_SET) { 862 if(cmd == IPC_SET) {
diff --git a/kernel/audit.c b/kernel/audit.c
index 5ce8851facf7..eb0f9165b401 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -392,6 +392,7 @@ static int kauditd_thread(void *dummy)
392{ 392{
393 struct sk_buff *skb; 393 struct sk_buff *skb;
394 394
395 set_freezable();
395 while (!kthread_should_stop()) { 396 while (!kthread_should_stop()) {
396 skb = skb_dequeue(&audit_skb_queue); 397 skb = skb_dequeue(&audit_skb_queue);
397 wake_up(&audit_backlog_wait); 398 wake_up(&audit_backlog_wait);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index ce61f423542c..1bf093dcffe0 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1210,8 +1210,8 @@ static inline int audit_add_rule(struct audit_entry *entry,
1210 struct audit_entry *e; 1210 struct audit_entry *e;
1211 struct audit_field *inode_f = entry->rule.inode_f; 1211 struct audit_field *inode_f = entry->rule.inode_f;
1212 struct audit_watch *watch = entry->rule.watch; 1212 struct audit_watch *watch = entry->rule.watch;
1213 struct nameidata *ndp, *ndw; 1213 struct nameidata *ndp = NULL, *ndw = NULL;
1214 int h, err, putnd_needed = 0; 1214 int h, err;
1215#ifdef CONFIG_AUDITSYSCALL 1215#ifdef CONFIG_AUDITSYSCALL
1216 int dont_count = 0; 1216 int dont_count = 0;
1217 1217
@@ -1239,7 +1239,6 @@ static inline int audit_add_rule(struct audit_entry *entry,
1239 err = audit_get_nd(watch->path, &ndp, &ndw); 1239 err = audit_get_nd(watch->path, &ndp, &ndw);
1240 if (err) 1240 if (err)
1241 goto error; 1241 goto error;
1242 putnd_needed = 1;
1243 } 1242 }
1244 1243
1245 mutex_lock(&audit_filter_mutex); 1244 mutex_lock(&audit_filter_mutex);
@@ -1269,14 +1268,11 @@ static inline int audit_add_rule(struct audit_entry *entry,
1269#endif 1268#endif
1270 mutex_unlock(&audit_filter_mutex); 1269 mutex_unlock(&audit_filter_mutex);
1271 1270
1272 if (putnd_needed) 1271 audit_put_nd(ndp, ndw); /* NULL args OK */
1273 audit_put_nd(ndp, ndw);
1274
1275 return 0; 1272 return 0;
1276 1273
1277error: 1274error:
1278 if (putnd_needed) 1275 audit_put_nd(ndp, ndw); /* NULL args OK */
1279 audit_put_nd(ndp, ndw);
1280 if (watch) 1276 if (watch)
1281 audit_put_watch(watch); /* tmp watch, matches initial get */ 1277 audit_put_watch(watch); /* tmp watch, matches initial get */
1282 return err; 1278 return err;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 208cf3497c10..181ae7086029 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -103,11 +103,19 @@ static inline void check_for_tasks(int cpu)
103 write_unlock_irq(&tasklist_lock); 103 write_unlock_irq(&tasklist_lock);
104} 104}
105 105
106struct take_cpu_down_param {
107 unsigned long mod;
108 void *hcpu;
109};
110
106/* Take this CPU down. */ 111/* Take this CPU down. */
107static int take_cpu_down(void *unused) 112static int take_cpu_down(void *_param)
108{ 113{
114 struct take_cpu_down_param *param = _param;
109 int err; 115 int err;
110 116
117 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
118 param->hcpu);
111 /* Ensure this CPU doesn't handle any more interrupts. */ 119 /* Ensure this CPU doesn't handle any more interrupts. */
112 err = __cpu_disable(); 120 err = __cpu_disable();
113 if (err < 0) 121 if (err < 0)
@@ -127,6 +135,10 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
127 cpumask_t old_allowed, tmp; 135 cpumask_t old_allowed, tmp;
128 void *hcpu = (void *)(long)cpu; 136 void *hcpu = (void *)(long)cpu;
129 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 137 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
138 struct take_cpu_down_param tcd_param = {
139 .mod = mod,
140 .hcpu = hcpu,
141 };
130 142
131 if (num_online_cpus() == 1) 143 if (num_online_cpus() == 1)
132 return -EBUSY; 144 return -EBUSY;
@@ -153,7 +165,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
153 set_cpus_allowed(current, tmp); 165 set_cpus_allowed(current, tmp);
154 166
155 mutex_lock(&cpu_bitmask_lock); 167 mutex_lock(&cpu_bitmask_lock);
156 p = __stop_machine_run(take_cpu_down, NULL, cpu); 168 p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
157 mutex_unlock(&cpu_bitmask_lock); 169 mutex_unlock(&cpu_bitmask_lock);
158 170
159 if (IS_ERR(p) || cpu_online(cpu)) { 171 if (IS_ERR(p) || cpu_online(cpu)) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 824b1c01f410..b4796d850140 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2138,6 +2138,9 @@ static void common_cpu_mem_hotplug_unplug(void)
2138static int cpuset_handle_cpuhp(struct notifier_block *nb, 2138static int cpuset_handle_cpuhp(struct notifier_block *nb,
2139 unsigned long phase, void *cpu) 2139 unsigned long phase, void *cpu)
2140{ 2140{
2141 if (phase == CPU_DYING || phase == CPU_DYING_FROZEN)
2142 return NOTIFY_DONE;
2143
2141 common_cpu_mem_hotplug_unplug(); 2144 common_cpu_mem_hotplug_unplug();
2142 return 0; 2145 return 0;
2143} 2146}
diff --git a/kernel/exit.c b/kernel/exit.c
index 57626692cd90..e8af8d0c2483 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -31,6 +31,7 @@
31#include <linux/mempolicy.h> 31#include <linux/mempolicy.h>
32#include <linux/taskstats_kern.h> 32#include <linux/taskstats_kern.h>
33#include <linux/delayacct.h> 33#include <linux/delayacct.h>
34#include <linux/freezer.h>
34#include <linux/cpuset.h> 35#include <linux/cpuset.h>
35#include <linux/syscalls.h> 36#include <linux/syscalls.h>
36#include <linux/signal.h> 37#include <linux/signal.h>
@@ -387,6 +388,11 @@ void daemonize(const char *name, ...)
387 * they would be locked into memory. 388 * they would be locked into memory.
388 */ 389 */
389 exit_mm(current); 390 exit_mm(current);
391 /*
392 * We don't want to have TIF_FREEZE set if the system-wide hibernation
393 * or suspend transition begins right now.
394 */
395 current->flags |= PF_NOFREEZE;
390 396
391 set_special_pids(1, 1); 397 set_special_pids(1, 1);
392 proc_clear_tty(current); 398 proc_clear_tty(current);
diff --git a/kernel/fork.c b/kernel/fork.c
index 7c5c5888e00a..ba39bdb2a7b8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -923,7 +923,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
923{ 923{
924 unsigned long new_flags = p->flags; 924 unsigned long new_flags = p->flags;
925 925
926 new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE); 926 new_flags &= ~PF_SUPERPRIV;
927 new_flags |= PF_FORKNOEXEC; 927 new_flags |= PF_FORKNOEXEC;
928 if (!(clone_flags & CLONE_PTRACE)) 928 if (!(clone_flags & CLONE_PTRACE))
929 p->ptrace = 0; 929 p->ptrace = 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 0d662475dd9f..474219a41929 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -152,7 +152,7 @@ static unsigned int get_symbol_offset(unsigned long pos)
152/* Lookup the address for this symbol. Returns 0 if not found. */ 152/* Lookup the address for this symbol. Returns 0 if not found. */
153unsigned long kallsyms_lookup_name(const char *name) 153unsigned long kallsyms_lookup_name(const char *name)
154{ 154{
155 char namebuf[KSYM_NAME_LEN+1]; 155 char namebuf[KSYM_NAME_LEN];
156 unsigned long i; 156 unsigned long i;
157 unsigned int off; 157 unsigned int off;
158 158
@@ -248,7 +248,7 @@ const char *kallsyms_lookup(unsigned long addr,
248{ 248{
249 const char *msym; 249 const char *msym;
250 250
251 namebuf[KSYM_NAME_LEN] = 0; 251 namebuf[KSYM_NAME_LEN - 1] = 0;
252 namebuf[0] = 0; 252 namebuf[0] = 0;
253 253
254 if (is_ksym_addr(addr)) { 254 if (is_ksym_addr(addr)) {
@@ -265,7 +265,7 @@ const char *kallsyms_lookup(unsigned long addr,
265 /* see if it's in a module */ 265 /* see if it's in a module */
266 msym = module_address_lookup(addr, symbolsize, offset, modname); 266 msym = module_address_lookup(addr, symbolsize, offset, modname);
267 if (msym) 267 if (msym)
268 return strncpy(namebuf, msym, KSYM_NAME_LEN); 268 return strncpy(namebuf, msym, KSYM_NAME_LEN - 1);
269 269
270 return NULL; 270 return NULL;
271} 271}
@@ -273,7 +273,7 @@ const char *kallsyms_lookup(unsigned long addr,
273int lookup_symbol_name(unsigned long addr, char *symname) 273int lookup_symbol_name(unsigned long addr, char *symname)
274{ 274{
275 symname[0] = '\0'; 275 symname[0] = '\0';
276 symname[KSYM_NAME_LEN] = '\0'; 276 symname[KSYM_NAME_LEN - 1] = '\0';
277 277
278 if (is_ksym_addr(addr)) { 278 if (is_ksym_addr(addr)) {
279 unsigned long pos; 279 unsigned long pos;
@@ -291,7 +291,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
291 unsigned long *offset, char *modname, char *name) 291 unsigned long *offset, char *modname, char *name)
292{ 292{
293 name[0] = '\0'; 293 name[0] = '\0';
294 name[KSYM_NAME_LEN] = '\0'; 294 name[KSYM_NAME_LEN - 1] = '\0';
295 295
296 if (is_ksym_addr(addr)) { 296 if (is_ksym_addr(addr)) {
297 unsigned long pos; 297 unsigned long pos;
@@ -312,7 +312,7 @@ int sprint_symbol(char *buffer, unsigned long address)
312 char *modname; 312 char *modname;
313 const char *name; 313 const char *name;
314 unsigned long offset, size; 314 unsigned long offset, size;
315 char namebuf[KSYM_NAME_LEN+1]; 315 char namebuf[KSYM_NAME_LEN];
316 316
317 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); 317 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
318 if (!name) 318 if (!name)
@@ -342,8 +342,8 @@ struct kallsym_iter
342 unsigned long value; 342 unsigned long value;
343 unsigned int nameoff; /* If iterating in core kernel symbols */ 343 unsigned int nameoff; /* If iterating in core kernel symbols */
344 char type; 344 char type;
345 char name[KSYM_NAME_LEN+1]; 345 char name[KSYM_NAME_LEN];
346 char module_name[MODULE_NAME_LEN + 1]; 346 char module_name[MODULE_NAME_LEN];
347 int exported; 347 int exported;
348}; 348};
349 349
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 1a5ff2211d88..edba2ffb43de 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -379,7 +379,7 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4
379 379
380static void print_lock_name(struct lock_class *class) 380static void print_lock_name(struct lock_class *class)
381{ 381{
382 char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4; 382 char str[KSYM_NAME_LEN], c1, c2, c3, c4;
383 const char *name; 383 const char *name;
384 384
385 get_usage_chars(class, &c1, &c2, &c3, &c4); 385 get_usage_chars(class, &c1, &c2, &c3, &c4);
@@ -401,7 +401,7 @@ static void print_lock_name(struct lock_class *class)
401static void print_lockdep_cache(struct lockdep_map *lock) 401static void print_lockdep_cache(struct lockdep_map *lock)
402{ 402{
403 const char *name; 403 const char *name;
404 char str[KSYM_NAME_LEN + 1]; 404 char str[KSYM_NAME_LEN];
405 405
406 name = lock->name; 406 name = lock->name;
407 if (!name) 407 if (!name)
diff --git a/kernel/module.c b/kernel/module.c
index 539fed9ac83c..33c04ad51175 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2133,7 +2133,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
2133 sym = get_ksymbol(mod, addr, NULL, NULL); 2133 sym = get_ksymbol(mod, addr, NULL, NULL);
2134 if (!sym) 2134 if (!sym)
2135 goto out; 2135 goto out;
2136 strlcpy(symname, sym, KSYM_NAME_LEN + 1); 2136 strlcpy(symname, sym, KSYM_NAME_LEN);
2137 mutex_unlock(&module_mutex); 2137 mutex_unlock(&module_mutex);
2138 return 0; 2138 return 0;
2139 } 2139 }
@@ -2158,9 +2158,9 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
2158 if (!sym) 2158 if (!sym)
2159 goto out; 2159 goto out;
2160 if (modname) 2160 if (modname)
2161 strlcpy(modname, mod->name, MODULE_NAME_LEN + 1); 2161 strlcpy(modname, mod->name, MODULE_NAME_LEN);
2162 if (name) 2162 if (name)
2163 strlcpy(name, sym, KSYM_NAME_LEN + 1); 2163 strlcpy(name, sym, KSYM_NAME_LEN);
2164 mutex_unlock(&module_mutex); 2164 mutex_unlock(&module_mutex);
2165 return 0; 2165 return 0;
2166 } 2166 }
@@ -2181,8 +2181,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2181 *value = mod->symtab[symnum].st_value; 2181 *value = mod->symtab[symnum].st_value;
2182 *type = mod->symtab[symnum].st_info; 2182 *type = mod->symtab[symnum].st_info;
2183 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, 2183 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
2184 KSYM_NAME_LEN + 1); 2184 KSYM_NAME_LEN);
2185 strlcpy(module_name, mod->name, MODULE_NAME_LEN + 1); 2185 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
2186 *exported = is_exported(name, mod); 2186 *exported = is_exported(name, mod);
2187 mutex_unlock(&module_mutex); 2187 mutex_unlock(&module_mutex);
2188 return 0; 2188 return 0;
diff --git a/kernel/panic.c b/kernel/panic.c
index 623d1828259a..f64f4c1ac11f 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -159,14 +159,15 @@ const char *print_tainted(void)
159{ 159{
160 static char buf[20]; 160 static char buf[20];
161 if (tainted) { 161 if (tainted) {
162 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c", 162 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c",
163 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G', 163 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
164 tainted & TAINT_FORCED_MODULE ? 'F' : ' ', 164 tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
165 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', 165 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
166 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ', 166 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
167 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', 167 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
168 tainted & TAINT_BAD_PAGE ? 'B' : ' ', 168 tainted & TAINT_BAD_PAGE ? 'B' : ' ',
169 tainted & TAINT_USER ? 'U' : ' '); 169 tainted & TAINT_USER ? 'U' : ' ',
170 tainted & TAINT_DIE ? 'D' : ' ');
170 } 171 }
171 else 172 else
172 snprintf(buf, sizeof(buf), "Not tainted"); 173 snprintf(buf, sizeof(buf), "Not tainted");
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b1d11f1c7cf7..4a1745f1dadf 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -490,3 +490,22 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
490 return ret; 490 return ret;
491} 491}
492#endif /* __ARCH_SYS_PTRACE */ 492#endif /* __ARCH_SYS_PTRACE */
493
494int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
495{
496 unsigned long tmp;
497 int copied;
498
499 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
500 if (copied != sizeof(tmp))
501 return -EIO;
502 return put_user(tmp, (unsigned long __user *)data);
503}
504
505int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
506{
507 int copied;
508
509 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
510 return (copied == sizeof(data)) ? 0 : -EIO;
511}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 55ba82a85a66..ddff33247785 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -40,6 +40,7 @@
40#include <linux/moduleparam.h> 40#include <linux/moduleparam.h>
41#include <linux/percpu.h> 41#include <linux/percpu.h>
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/freezer.h>
43#include <linux/cpu.h> 44#include <linux/cpu.h>
44#include <linux/random.h> 45#include <linux/random.h>
45#include <linux/delay.h> 46#include <linux/delay.h>
@@ -518,7 +519,6 @@ rcu_torture_writer(void *arg)
518 519
519 VERBOSE_PRINTK_STRING("rcu_torture_writer task started"); 520 VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
520 set_user_nice(current, 19); 521 set_user_nice(current, 19);
521 current->flags |= PF_NOFREEZE;
522 522
523 do { 523 do {
524 schedule_timeout_uninterruptible(1); 524 schedule_timeout_uninterruptible(1);
@@ -558,7 +558,6 @@ rcu_torture_fakewriter(void *arg)
558 558
559 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started"); 559 VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
560 set_user_nice(current, 19); 560 set_user_nice(current, 19);
561 current->flags |= PF_NOFREEZE;
562 561
563 do { 562 do {
564 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); 563 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
@@ -589,7 +588,6 @@ rcu_torture_reader(void *arg)
589 588
590 VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); 589 VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
591 set_user_nice(current, 19); 590 set_user_nice(current, 19);
592 current->flags |= PF_NOFREEZE;
593 591
594 do { 592 do {
595 idx = cur_ops->readlock(); 593 idx = cur_ops->readlock();
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index 015fc633c96c..e3055ba69159 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -260,6 +260,7 @@ static int test_func(void *data)
260 int ret; 260 int ret;
261 261
262 current->flags |= PF_MUTEX_TESTER; 262 current->flags |= PF_MUTEX_TESTER;
263 set_freezable();
263 allow_signal(SIGHUP); 264 allow_signal(SIGHUP);
264 265
265 for(;;) { 266 for(;;) {
diff --git a/kernel/sched.c b/kernel/sched.c
index 1c8076676eb1..cb31fb4a1379 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4912,8 +4912,6 @@ static int migration_thread(void *data)
4912 struct migration_req *req; 4912 struct migration_req *req;
4913 struct list_head *head; 4913 struct list_head *head;
4914 4914
4915 try_to_freeze();
4916
4917 spin_lock_irq(&rq->lock); 4915 spin_lock_irq(&rq->lock);
4918 4916
4919 if (cpu_is_offline(cpu)) { 4917 if (cpu_is_offline(cpu)) {
@@ -5147,7 +5145,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5147 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu); 5145 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
5148 if (IS_ERR(p)) 5146 if (IS_ERR(p))
5149 return NOTIFY_BAD; 5147 return NOTIFY_BAD;
5150 p->flags |= PF_NOFREEZE;
5151 kthread_bind(p, cpu); 5148 kthread_bind(p, cpu);
5152 /* Must be high prio: stop_machine expects to yield to it. */ 5149 /* Must be high prio: stop_machine expects to yield to it. */
5153 rq = task_rq_lock(p, &flags); 5150 rq = task_rq_lock(p, &flags);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8de267790166..0f546ddea43d 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -14,6 +14,7 @@
14#include <linux/notifier.h> 14#include <linux/notifier.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/freezer.h>
17#include <linux/kthread.h> 18#include <linux/kthread.h>
18#include <linux/rcupdate.h> 19#include <linux/rcupdate.h>
19#include <linux/smp.h> 20#include <linux/smp.h>
@@ -488,8 +489,6 @@ void __init softirq_init(void)
488 489
489static int ksoftirqd(void * __bind_cpu) 490static int ksoftirqd(void * __bind_cpu)
490{ 491{
491 current->flags |= PF_NOFREEZE;
492
493 set_current_state(TASK_INTERRUPTIBLE); 492 set_current_state(TASK_INTERRUPTIBLE);
494 493
495 while (!kthread_should_stop()) { 494 while (!kthread_should_stop()) {
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 0131e296ffb4..708d4882c0c3 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -10,6 +10,7 @@
10#include <linux/cpu.h> 10#include <linux/cpu.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/freezer.h>
13#include <linux/kthread.h> 14#include <linux/kthread.h>
14#include <linux/notifier.h> 15#include <linux/notifier.h>
15#include <linux/module.h> 16#include <linux/module.h>
@@ -116,7 +117,6 @@ static int watchdog(void * __bind_cpu)
116 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 117 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
117 118
118 sched_setscheduler(current, SCHED_FIFO, &param); 119 sched_setscheduler(current, SCHED_FIFO, &param);
119 current->flags |= PF_NOFREEZE;
120 120
121 /* initialize timestamp */ 121 /* initialize timestamp */
122 touch_softlockup_watchdog(); 122 touch_softlockup_watchdog();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2ce7acf841ae..7063ebc6db05 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -29,6 +29,7 @@
29#include <linux/utsname.h> 29#include <linux/utsname.h>
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/smp_lock.h> 31#include <linux/smp_lock.h>
32#include <linux/fs.h>
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/kernel.h> 34#include <linux/kernel.h>
34#include <linux/kobject.h> 35#include <linux/kobject.h>
@@ -49,9 +50,6 @@
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
50#include <asm/processor.h> 51#include <asm/processor.h>
51 52
52extern int proc_nr_files(ctl_table *table, int write, struct file *filp,
53 void __user *buffer, size_t *lenp, loff_t *ppos);
54
55#ifdef CONFIG_X86 53#ifdef CONFIG_X86
56#include <asm/nmi.h> 54#include <asm/nmi.h>
57#include <asm/stacktrace.h> 55#include <asm/stacktrace.h>
@@ -826,6 +824,14 @@ static ctl_table vm_table[] = {
826 .mode = 0644, 824 .mode = 0644,
827 .proc_handler = &proc_dointvec, 825 .proc_handler = &proc_dointvec,
828 }, 826 },
827 {
828 .ctl_name = CTL_UNNUMBERED,
829 .procname = "hugepages_treat_as_movable",
830 .data = &hugepages_treat_as_movable,
831 .maxlen = sizeof(int),
832 .mode = 0644,
833 .proc_handler = &hugetlb_treat_movable_handler,
834 },
829#endif 835#endif
830 { 836 {
831 .ctl_name = VM_LOWMEM_RESERVE_RATIO, 837 .ctl_name = VM_LOWMEM_RESERVE_RATIO,
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 8bbcfb77f7d2..e5edc3a22a08 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -38,7 +38,7 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
38 38
39static void print_name_offset(struct seq_file *m, void *sym) 39static void print_name_offset(struct seq_file *m, void *sym)
40{ 40{
41 char symname[KSYM_NAME_LEN+1]; 41 char symname[KSYM_NAME_LEN];
42 42
43 if (lookup_symbol_name((unsigned long)sym, symname) < 0) 43 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
44 SEQ_printf(m, "<%p>", sym); 44 SEQ_printf(m, "<%p>", sym);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 9b8a826236dd..8ed62fda16c6 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -269,7 +269,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
269 269
270static void print_name_offset(struct seq_file *m, unsigned long addr) 270static void print_name_offset(struct seq_file *m, unsigned long addr)
271{ 271{
272 char symname[KSYM_NAME_LEN+1]; 272 char symname[KSYM_NAME_LEN];
273 273
274 if (lookup_symbol_name(addr, symname) < 0) 274 if (lookup_symbol_name(addr, symname) < 0)
275 seq_printf(m, "<%p>", (void *)addr); 275 seq_printf(m, "<%p>", (void *)addr);
diff --git a/kernel/timer.c b/kernel/timer.c
index 1258371e0d2b..b7792fb03387 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1221,7 +1221,8 @@ static int __devinit init_timers_cpu(int cpu)
1221 /* 1221 /*
1222 * The APs use this path later in boot 1222 * The APs use this path later in boot
1223 */ 1223 */
1224 base = kmalloc_node(sizeof(*base), GFP_KERNEL, 1224 base = kmalloc_node(sizeof(*base),
1225 GFP_KERNEL | __GFP_ZERO,
1225 cpu_to_node(cpu)); 1226 cpu_to_node(cpu));
1226 if (!base) 1227 if (!base)
1227 return -ENOMEM; 1228 return -ENOMEM;
@@ -1232,7 +1233,6 @@ static int __devinit init_timers_cpu(int cpu)
1232 kfree(base); 1233 kfree(base);
1233 return -ENOMEM; 1234 return -ENOMEM;
1234 } 1235 }
1235 memset(base, 0, sizeof(*base));
1236 per_cpu(tvec_bases, cpu) = base; 1236 per_cpu(tvec_bases, cpu) = base;
1237 } else { 1237 } else {
1238 /* 1238 /*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d7d3fa3072e5..58e5c152a6bb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -282,8 +282,8 @@ static int worker_thread(void *__cwq)
282 struct cpu_workqueue_struct *cwq = __cwq; 282 struct cpu_workqueue_struct *cwq = __cwq;
283 DEFINE_WAIT(wait); 283 DEFINE_WAIT(wait);
284 284
285 if (!cwq->wq->freezeable) 285 if (cwq->wq->freezeable)
286 current->flags |= PF_NOFREEZE; 286 set_freezable();
287 287
288 set_user_nice(current, -5); 288 set_user_nice(current, -5);
289 289
@@ -752,18 +752,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
752 if (cwq->thread == NULL) 752 if (cwq->thread == NULL)
753 return; 753 return;
754 754
755 flush_cpu_workqueue(cwq);
755 /* 756 /*
756 * If the caller is CPU_DEAD the single flush_cpu_workqueue() 757 * If the caller is CPU_DEAD and cwq->worklist was not empty,
757 * is not enough, a concurrent flush_workqueue() can insert a 758 * a concurrent flush_workqueue() can insert a barrier after us.
758 * barrier after us. 759 * However, in that case run_workqueue() won't return and check
760 * kthread_should_stop() until it flushes all work_struct's.
759 * When ->worklist becomes empty it is safe to exit because no 761 * When ->worklist becomes empty it is safe to exit because no
760 * more work_structs can be queued on this cwq: flush_workqueue 762 * more work_structs can be queued on this cwq: flush_workqueue
761 * checks list_empty(), and a "normal" queue_work() can't use 763 * checks list_empty(), and a "normal" queue_work() can't use
762 * a dead CPU. 764 * a dead CPU.
763 */ 765 */
764 while (flush_cpu_workqueue(cwq))
765 ;
766
767 kthread_stop(cwq->thread); 766 kthread_stop(cwq->thread);
768 cwq->thread = NULL; 767 cwq->thread = NULL;
769} 768}
diff --git a/lib/Kconfig b/lib/Kconfig
index 3eb29d5dc4f5..e5c2c514174a 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -41,6 +41,14 @@ config CRC32
41 kernel tree does. Such modules that use library CRC32 functions 41 kernel tree does. Such modules that use library CRC32 functions
42 require M here. 42 require M here.
43 43
44config CRC7
45 tristate "CRC7 functions"
46 help
47 This option is provided for the case where no in-kernel-tree
48 modules require CRC7 functions, but a module built outside
49 the kernel tree does. Such modules that use library CRC7
50 functions require M here.
51
44config LIBCRC32C 52config LIBCRC32C
45 tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check" 53 tristate "CRC32c (Castagnoli, et al) Cyclic Redundancy-Check"
46 help 54 help
diff --git a/lib/Makefile b/lib/Makefile
index 8363b60be9dd..da68b2ca0606 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
43obj-$(CONFIG_CRC16) += crc16.o 43obj-$(CONFIG_CRC16) += crc16.o
44obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o 44obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
45obj-$(CONFIG_CRC32) += crc32.o 45obj-$(CONFIG_CRC32) += crc32.o
46obj-$(CONFIG_CRC7) += crc7.o
46obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 47obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
47obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o 48obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
48 49
diff --git a/lib/crc7.c b/lib/crc7.c
new file mode 100644
index 000000000000..f1c3a144cec1
--- /dev/null
+++ b/lib/crc7.c
@@ -0,0 +1,68 @@
1/*
2 * crc7.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */
7
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/crc7.h>
11
12
13/* Table for CRC-7 (polynomial x^7 + x^3 + 1) */
14const u8 crc7_syndrome_table[256] = {
15 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f,
16 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
17 0x19, 0x10, 0x0b, 0x02, 0x3d, 0x34, 0x2f, 0x26,
18 0x51, 0x58, 0x43, 0x4a, 0x75, 0x7c, 0x67, 0x6e,
19 0x32, 0x3b, 0x20, 0x29, 0x16, 0x1f, 0x04, 0x0d,
20 0x7a, 0x73, 0x68, 0x61, 0x5e, 0x57, 0x4c, 0x45,
21 0x2b, 0x22, 0x39, 0x30, 0x0f, 0x06, 0x1d, 0x14,
22 0x63, 0x6a, 0x71, 0x78, 0x47, 0x4e, 0x55, 0x5c,
23 0x64, 0x6d, 0x76, 0x7f, 0x40, 0x49, 0x52, 0x5b,
24 0x2c, 0x25, 0x3e, 0x37, 0x08, 0x01, 0x1a, 0x13,
25 0x7d, 0x74, 0x6f, 0x66, 0x59, 0x50, 0x4b, 0x42,
26 0x35, 0x3c, 0x27, 0x2e, 0x11, 0x18, 0x03, 0x0a,
27 0x56, 0x5f, 0x44, 0x4d, 0x72, 0x7b, 0x60, 0x69,
28 0x1e, 0x17, 0x0c, 0x05, 0x3a, 0x33, 0x28, 0x21,
29 0x4f, 0x46, 0x5d, 0x54, 0x6b, 0x62, 0x79, 0x70,
30 0x07, 0x0e, 0x15, 0x1c, 0x23, 0x2a, 0x31, 0x38,
31 0x41, 0x48, 0x53, 0x5a, 0x65, 0x6c, 0x77, 0x7e,
32 0x09, 0x00, 0x1b, 0x12, 0x2d, 0x24, 0x3f, 0x36,
33 0x58, 0x51, 0x4a, 0x43, 0x7c, 0x75, 0x6e, 0x67,
34 0x10, 0x19, 0x02, 0x0b, 0x34, 0x3d, 0x26, 0x2f,
35 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
36 0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04,
37 0x6a, 0x63, 0x78, 0x71, 0x4e, 0x47, 0x5c, 0x55,
38 0x22, 0x2b, 0x30, 0x39, 0x06, 0x0f, 0x14, 0x1d,
39 0x25, 0x2c, 0x37, 0x3e, 0x01, 0x08, 0x13, 0x1a,
40 0x6d, 0x64, 0x7f, 0x76, 0x49, 0x40, 0x5b, 0x52,
41 0x3c, 0x35, 0x2e, 0x27, 0x18, 0x11, 0x0a, 0x03,
42 0x74, 0x7d, 0x66, 0x6f, 0x50, 0x59, 0x42, 0x4b,
43 0x17, 0x1e, 0x05, 0x0c, 0x33, 0x3a, 0x21, 0x28,
44 0x5f, 0x56, 0x4d, 0x44, 0x7b, 0x72, 0x69, 0x60,
45 0x0e, 0x07, 0x1c, 0x15, 0x2a, 0x23, 0x38, 0x31,
46 0x46, 0x4f, 0x54, 0x5d, 0x62, 0x6b, 0x70, 0x79
47};
48EXPORT_SYMBOL(crc7_syndrome_table);
49
50/**
51 * crc7 - update the CRC7 for the data buffer
52 * @crc: previous CRC7 value
53 * @buffer: data pointer
54 * @len: number of bytes in the buffer
55 * Context: any
56 *
57 * Returns the updated CRC7 value.
58 */
59u8 crc7(u8 crc, const u8 *buffer, size_t len)
60{
61 while (len--)
62 crc = crc7_byte(crc, *buffer++);
63 return crc;
64}
65EXPORT_SYMBOL(crc7);
66
67MODULE_DESCRIPTION("CRC7 calculations");
68MODULE_LICENSE("GPL");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index eb7c2bab9ebf..f6d276db2d58 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -54,11 +54,10 @@ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size,
54 int nbytes = sizeof(struct gen_pool_chunk) + 54 int nbytes = sizeof(struct gen_pool_chunk) +
55 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; 55 (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
56 56
57 chunk = kmalloc_node(nbytes, GFP_KERNEL, nid); 57 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
58 if (unlikely(chunk == NULL)) 58 if (unlikely(chunk == NULL))
59 return -1; 59 return -1;
60 60
61 memset(chunk, 0, nbytes);
62 spin_lock_init(&chunk->lock); 61 spin_lock_init(&chunk->lock);
63 chunk->start_addr = addr; 62 chunk->start_addr = addr;
64 chunk->end_addr = addr + size; 63 chunk->end_addr = addr + size;
diff --git a/mm/Kconfig b/mm/Kconfig
index 086af703da43..86187221e78f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -163,6 +163,10 @@ config ZONE_DMA_FLAG
163 default "0" if !ZONE_DMA 163 default "0" if !ZONE_DMA
164 default "1" 164 default "1"
165 165
166config BOUNCE
167 def_bool y
168 depends on BLOCK && MMU && (ZONE_DMA || HIGHMEM)
169
166config NR_QUICK 170config NR_QUICK
167 int 171 int
168 depends on QUICKLIST 172 depends on QUICKLIST
diff --git a/mm/Makefile b/mm/Makefile
index a9148ea329aa..245e33ab00c4 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,9 +13,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 13 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
14 $(mmu-y) 14 $(mmu-y)
15 15
16ifeq ($(CONFIG_MMU)$(CONFIG_BLOCK),yy) 16obj-$(CONFIG_BOUNCE) += bounce.o
17obj-y += bounce.o
18endif
19obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 17obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
20obj-$(CONFIG_HUGETLBFS) += hugetlb.o 18obj-$(CONFIG_HUGETLBFS) += hugetlb.o
21obj-$(CONFIG_NUMA) += mempolicy.o 19obj-$(CONFIG_NUMA) += mempolicy.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index b2486cf887a0..00b02623f008 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -53,12 +53,9 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
53 int node = cpu_to_node(cpu); 53 int node = cpu_to_node(cpu);
54 54
55 BUG_ON(pdata->ptrs[cpu]); 55 BUG_ON(pdata->ptrs[cpu]);
56 if (node_online(node)) { 56 if (node_online(node))
57 /* FIXME: kzalloc_node(size, gfp, node) */ 57 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
58 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node); 58 else
59 if (pdata->ptrs[cpu])
60 memset(pdata->ptrs[cpu], 0, size);
61 } else
62 pdata->ptrs[cpu] = kzalloc(size, gfp); 59 pdata->ptrs[cpu] = kzalloc(size, gfp);
63 return pdata->ptrs[cpu]; 60 return pdata->ptrs[cpu];
64} 61}
diff --git a/mm/filemap.c b/mm/filemap.c
index 100b99c2d504..5d5449f3d41c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -867,13 +867,11 @@ void do_generic_mapping_read(struct address_space *mapping,
867{ 867{
868 struct inode *inode = mapping->host; 868 struct inode *inode = mapping->host;
869 unsigned long index; 869 unsigned long index;
870 unsigned long end_index;
871 unsigned long offset; 870 unsigned long offset;
872 unsigned long last_index; 871 unsigned long last_index;
873 unsigned long next_index; 872 unsigned long next_index;
874 unsigned long prev_index; 873 unsigned long prev_index;
875 unsigned int prev_offset; 874 unsigned int prev_offset;
876 loff_t isize;
877 struct page *cached_page; 875 struct page *cached_page;
878 int error; 876 int error;
879 struct file_ra_state ra = *_ra; 877 struct file_ra_state ra = *_ra;
@@ -886,27 +884,12 @@ void do_generic_mapping_read(struct address_space *mapping,
886 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; 884 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
887 offset = *ppos & ~PAGE_CACHE_MASK; 885 offset = *ppos & ~PAGE_CACHE_MASK;
888 886
889 isize = i_size_read(inode);
890 if (!isize)
891 goto out;
892
893 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
894 for (;;) { 887 for (;;) {
895 struct page *page; 888 struct page *page;
889 unsigned long end_index;
890 loff_t isize;
896 unsigned long nr, ret; 891 unsigned long nr, ret;
897 892
898 /* nr is the maximum number of bytes to copy from this page */
899 nr = PAGE_CACHE_SIZE;
900 if (index >= end_index) {
901 if (index > end_index)
902 goto out;
903 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
904 if (nr <= offset) {
905 goto out;
906 }
907 }
908 nr = nr - offset;
909
910 cond_resched(); 893 cond_resched();
911 if (index == next_index) 894 if (index == next_index)
912 next_index = page_cache_readahead(mapping, &ra, filp, 895 next_index = page_cache_readahead(mapping, &ra, filp,
@@ -921,6 +904,32 @@ find_page:
921 if (!PageUptodate(page)) 904 if (!PageUptodate(page))
922 goto page_not_up_to_date; 905 goto page_not_up_to_date;
923page_ok: 906page_ok:
907 /*
908 * i_size must be checked after we know the page is Uptodate.
909 *
910 * Checking i_size after the check allows us to calculate
911 * the correct value for "nr", which means the zero-filled
912 * part of the page is not copied back to userspace (unless
913 * another truncate extends the file - this is desired though).
914 */
915
916 isize = i_size_read(inode);
917 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
918 if (unlikely(!isize || index > end_index)) {
919 page_cache_release(page);
920 goto out;
921 }
922
923 /* nr is the maximum number of bytes to copy from this page */
924 nr = PAGE_CACHE_SIZE;
925 if (index == end_index) {
926 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
927 if (nr <= offset) {
928 page_cache_release(page);
929 goto out;
930 }
931 }
932 nr = nr - offset;
924 933
925 /* If users can be writing to this page using arbitrary 934 /* If users can be writing to this page using arbitrary
926 * virtual addresses, take care about potential aliasing 935 * virtual addresses, take care about potential aliasing
@@ -1007,31 +1016,6 @@ readpage:
1007 unlock_page(page); 1016 unlock_page(page);
1008 } 1017 }
1009 1018
1010 /*
1011 * i_size must be checked after we have done ->readpage.
1012 *
1013 * Checking i_size after the readpage allows us to calculate
1014 * the correct value for "nr", which means the zero-filled
1015 * part of the page is not copied back to userspace (unless
1016 * another truncate extends the file - this is desired though).
1017 */
1018 isize = i_size_read(inode);
1019 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1020 if (unlikely(!isize || index > end_index)) {
1021 page_cache_release(page);
1022 goto out;
1023 }
1024
1025 /* nr is the maximum number of bytes to copy from this page */
1026 nr = PAGE_CACHE_SIZE;
1027 if (index == end_index) {
1028 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1029 if (nr <= offset) {
1030 page_cache_release(page);
1031 goto out;
1032 }
1033 }
1034 nr = nr - offset;
1035 goto page_ok; 1019 goto page_ok;
1036 1020
1037readpage_error: 1021readpage_error:
diff --git a/mm/highmem.c b/mm/highmem.c
index be8f8d36a8b9..7a967bc35152 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -46,9 +46,14 @@ unsigned int nr_free_highpages (void)
46 pg_data_t *pgdat; 46 pg_data_t *pgdat;
47 unsigned int pages = 0; 47 unsigned int pages = 0;
48 48
49 for_each_online_pgdat(pgdat) 49 for_each_online_pgdat(pgdat) {
50 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 50 pages += zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
51 NR_FREE_PAGES); 51 NR_FREE_PAGES);
52 if (zone_movable_is_highmem())
53 pages += zone_page_state(
54 &pgdat->node_zones[ZONE_MOVABLE],
55 NR_FREE_PAGES);
56 }
52 57
53 return pages; 58 return pages;
54} 59}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index acc0fb3cf067..6912bbf33faa 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -27,6 +27,9 @@ unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES]; 27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES]; 29static unsigned int free_huge_pages_node[MAX_NUMNODES];
30static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31unsigned long hugepages_treat_as_movable;
32
30/* 33/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 34 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */ 35 */
@@ -68,12 +71,13 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
68{ 71{
69 int nid; 72 int nid;
70 struct page *page = NULL; 73 struct page *page = NULL;
71 struct zonelist *zonelist = huge_zonelist(vma, address); 74 struct zonelist *zonelist = huge_zonelist(vma, address,
75 htlb_alloc_mask);
72 struct zone **z; 76 struct zone **z;
73 77
74 for (z = zonelist->zones; *z; z++) { 78 for (z = zonelist->zones; *z; z++) {
75 nid = zone_to_nid(*z); 79 nid = zone_to_nid(*z);
76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) && 80 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
77 !list_empty(&hugepage_freelists[nid])) 81 !list_empty(&hugepage_freelists[nid]))
78 break; 82 break;
79 } 83 }
@@ -113,7 +117,7 @@ static int alloc_fresh_huge_page(void)
113 prev_nid = nid; 117 prev_nid = nid;
114 spin_unlock(&nid_lock); 118 spin_unlock(&nid_lock);
115 119
116 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 120 page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
117 HUGETLB_PAGE_ORDER); 121 HUGETLB_PAGE_ORDER);
118 if (page) { 122 if (page) {
119 set_compound_page_dtor(page, free_huge_page); 123 set_compound_page_dtor(page, free_huge_page);
@@ -263,6 +267,19 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
263 max_huge_pages = set_max_huge_pages(max_huge_pages); 267 max_huge_pages = set_max_huge_pages(max_huge_pages);
264 return 0; 268 return 0;
265} 269}
270
271int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
272 struct file *file, void __user *buffer,
273 size_t *length, loff_t *ppos)
274{
275 proc_dointvec(table, write, file, buffer, length, ppos);
276 if (hugepages_treat_as_movable)
277 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
278 else
279 htlb_alloc_mask = GFP_HIGHUSER;
280 return 0;
281}
282
266#endif /* CONFIG_SYSCTL */ 283#endif /* CONFIG_SYSCTL */
267 284
268int hugetlb_report_meminfo(char *buf) 285int hugetlb_report_meminfo(char *buf)
@@ -481,7 +498,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
481 return VM_FAULT_MINOR; 498 return VM_FAULT_MINOR;
482} 499}
483 500
484int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 501static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
485 unsigned long address, pte_t *ptep, int write_access) 502 unsigned long address, pte_t *ptep, int write_access)
486{ 503{
487 int ret = VM_FAULT_SIGBUS; 504 int ret = VM_FAULT_SIGBUS;
diff --git a/mm/memory.c b/mm/memory.c
index b3d73bb1f680..9c6ff7fffdc8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1715,11 +1715,11 @@ gotten:
1715 if (unlikely(anon_vma_prepare(vma))) 1715 if (unlikely(anon_vma_prepare(vma)))
1716 goto oom; 1716 goto oom;
1717 if (old_page == ZERO_PAGE(address)) { 1717 if (old_page == ZERO_PAGE(address)) {
1718 new_page = alloc_zeroed_user_highpage(vma, address); 1718 new_page = alloc_zeroed_user_highpage_movable(vma, address);
1719 if (!new_page) 1719 if (!new_page)
1720 goto oom; 1720 goto oom;
1721 } else { 1721 } else {
1722 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1722 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1723 if (!new_page) 1723 if (!new_page)
1724 goto oom; 1724 goto oom;
1725 cow_user_page(new_page, old_page, address, vma); 1725 cow_user_page(new_page, old_page, address, vma);
@@ -2237,7 +2237,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2237 2237
2238 if (unlikely(anon_vma_prepare(vma))) 2238 if (unlikely(anon_vma_prepare(vma)))
2239 goto oom; 2239 goto oom;
2240 page = alloc_zeroed_user_highpage(vma, address); 2240 page = alloc_zeroed_user_highpage_movable(vma, address);
2241 if (!page) 2241 if (!page)
2242 goto oom; 2242 goto oom;
2243 2243
@@ -2340,7 +2340,8 @@ retry:
2340 2340
2341 if (unlikely(anon_vma_prepare(vma))) 2341 if (unlikely(anon_vma_prepare(vma)))
2342 goto oom; 2342 goto oom;
2343 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2343 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2344 vma, address);
2344 if (!page) 2345 if (!page)
2345 goto oom; 2346 goto oom;
2346 copy_user_highpage(page, new_page, address, vma); 2347 copy_user_highpage(page, new_page, address, vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 188f8d9c4aed..9f4e9b95e8f2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -594,7 +594,7 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
594 594
595static struct page *new_node_page(struct page *page, unsigned long node, int **x) 595static struct page *new_node_page(struct page *page, unsigned long node, int **x)
596{ 596{
597 return alloc_pages_node(node, GFP_HIGHUSER, 0); 597 return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
598} 598}
599 599
600/* 600/*
@@ -710,7 +710,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
710{ 710{
711 struct vm_area_struct *vma = (struct vm_area_struct *)private; 711 struct vm_area_struct *vma = (struct vm_area_struct *)private;
712 712
713 return alloc_page_vma(GFP_HIGHUSER, vma, page_address_in_vma(page, vma)); 713 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
714 page_address_in_vma(page, vma));
714} 715}
715#else 716#else
716 717
@@ -1202,7 +1203,8 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
1202 1203
1203#ifdef CONFIG_HUGETLBFS 1204#ifdef CONFIG_HUGETLBFS
1204/* Return a zonelist suitable for a huge page allocation. */ 1205/* Return a zonelist suitable for a huge page allocation. */
1205struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr) 1206struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1207 gfp_t gfp_flags)
1206{ 1208{
1207 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1209 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1208 1210
@@ -1210,7 +1212,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr)
1210 unsigned nid; 1212 unsigned nid;
1211 1213
1212 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT); 1214 nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
1213 return NODE_DATA(nid)->node_zonelists + gfp_zone(GFP_HIGHUSER); 1215 return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
1214 } 1216 }
1215 return zonelist_policy(GFP_HIGHUSER, pol); 1217 return zonelist_policy(GFP_HIGHUSER, pol);
1216} 1218}
diff --git a/mm/mempool.c b/mm/mempool.c
index 3e8f1fed0e1f..02d5ec3feabc 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -62,10 +62,9 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
62 mempool_free_t *free_fn, void *pool_data, int node_id) 62 mempool_free_t *free_fn, void *pool_data, int node_id)
63{ 63{
64 mempool_t *pool; 64 mempool_t *pool;
65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); 65 pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
66 if (!pool) 66 if (!pool)
67 return NULL; 67 return NULL;
68 memset(pool, 0, sizeof(*pool));
69 pool->elements = kmalloc_node(min_nr * sizeof(void *), 68 pool->elements = kmalloc_node(min_nr * sizeof(void *),
70 GFP_KERNEL, node_id); 69 GFP_KERNEL, node_id);
71 if (!pool->elements) { 70 if (!pool->elements) {
diff --git a/mm/migrate.c b/mm/migrate.c
index a91ca00abebe..34d8ada053e4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -761,7 +761,8 @@ static struct page *new_page_node(struct page *p, unsigned long private,
761 761
762 *result = &pm->status; 762 *result = &pm->status;
763 763
764 return alloc_pages_node(pm->node, GFP_HIGHUSER | GFP_THISNODE, 0); 764 return alloc_pages_node(pm->node,
765 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
765} 766}
766 767
767/* 768/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ea9da3bed3e9..886ea0d5a136 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -824,6 +824,7 @@ int __set_page_dirty_nobuffers(struct page *page)
824 mapping2 = page_mapping(page); 824 mapping2 = page_mapping(page);
825 if (mapping2) { /* Race with truncate? */ 825 if (mapping2) { /* Race with truncate? */
826 BUG_ON(mapping2 != mapping); 826 BUG_ON(mapping2 != mapping);
827 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
827 if (mapping_cap_account_dirty(mapping)) { 828 if (mapping_cap_account_dirty(mapping)) {
828 __inc_zone_page_state(page, NR_FILE_DIRTY); 829 __inc_zone_page_state(page, NR_FILE_DIRTY);
829 task_io_account_write(PAGE_CACHE_SIZE); 830 task_io_account_write(PAGE_CACHE_SIZE);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f9e4e647d7e8..e2a10b957f23 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -80,8 +80,9 @@ int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
80 256, 80 256,
81#endif 81#endif
82#ifdef CONFIG_HIGHMEM 82#ifdef CONFIG_HIGHMEM
83 32 83 32,
84#endif 84#endif
85 32,
85}; 86};
86 87
87EXPORT_SYMBOL(totalram_pages); 88EXPORT_SYMBOL(totalram_pages);
@@ -95,8 +96,9 @@ static char * const zone_names[MAX_NR_ZONES] = {
95#endif 96#endif
96 "Normal", 97 "Normal",
97#ifdef CONFIG_HIGHMEM 98#ifdef CONFIG_HIGHMEM
98 "HighMem" 99 "HighMem",
99#endif 100#endif
101 "Movable",
100}; 102};
101 103
102int min_free_kbytes = 1024; 104int min_free_kbytes = 1024;
@@ -134,6 +136,13 @@ static unsigned long __meminitdata dma_reserve;
134 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; 136 static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
135 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; 137 static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
136#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 138#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
139 unsigned long __initdata required_kernelcore;
140 unsigned long __initdata required_movablecore;
141 unsigned long __initdata zone_movable_pfn[MAX_NUMNODES];
142
143 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
144 int movable_zone;
145 EXPORT_SYMBOL(movable_zone);
137#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 146#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
138 147
139#if MAX_NUMNODES > 1 148#if MAX_NUMNODES > 1
@@ -1324,7 +1333,7 @@ nofail_alloc:
1324 reclaim_state.reclaimed_slab = 0; 1333 reclaim_state.reclaimed_slab = 0;
1325 p->reclaim_state = &reclaim_state; 1334 p->reclaim_state = &reclaim_state;
1326 1335
1327 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1336 did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
1328 1337
1329 p->reclaim_state = NULL; 1338 p->reclaim_state = NULL;
1330 p->flags &= ~PF_MEMALLOC; 1339 p->flags &= ~PF_MEMALLOC;
@@ -1361,7 +1370,8 @@ nofail_alloc:
1361 */ 1370 */
1362 do_retry = 0; 1371 do_retry = 0;
1363 if (!(gfp_mask & __GFP_NORETRY)) { 1372 if (!(gfp_mask & __GFP_NORETRY)) {
1364 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1373 if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
1374 (gfp_mask & __GFP_REPEAT))
1365 do_retry = 1; 1375 do_retry = 1;
1366 if (gfp_mask & __GFP_NOFAIL) 1376 if (gfp_mask & __GFP_NOFAIL)
1367 do_retry = 1; 1377 do_retry = 1;
@@ -1474,13 +1484,14 @@ unsigned int nr_free_buffer_pages(void)
1474{ 1484{
1475 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1485 return nr_free_zone_pages(gfp_zone(GFP_USER));
1476} 1486}
1487EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1477 1488
1478/* 1489/*
1479 * Amount of free RAM allocatable within all zones 1490 * Amount of free RAM allocatable within all zones
1480 */ 1491 */
1481unsigned int nr_free_pagecache_pages(void) 1492unsigned int nr_free_pagecache_pages(void)
1482{ 1493{
1483 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1494 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1484} 1495}
1485 1496
1486static inline void show_node(struct zone *zone) 1497static inline void show_node(struct zone *zone)
@@ -2667,6 +2678,63 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
2667} 2678}
2668 2679
2669/* 2680/*
2681 * This finds a zone that can be used for ZONE_MOVABLE pages. The
2682 * assumption is made that zones within a node are ordered in monotonic
2683 * increasing memory addresses so that the "highest" populated zone is used
2684 */
2685void __init find_usable_zone_for_movable(void)
2686{
2687 int zone_index;
2688 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
2689 if (zone_index == ZONE_MOVABLE)
2690 continue;
2691
2692 if (arch_zone_highest_possible_pfn[zone_index] >
2693 arch_zone_lowest_possible_pfn[zone_index])
2694 break;
2695 }
2696
2697 VM_BUG_ON(zone_index == -1);
2698 movable_zone = zone_index;
2699}
2700
2701/*
2702 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
2703 * because it is sized independant of architecture. Unlike the other zones,
2704 * the starting point for ZONE_MOVABLE is not fixed. It may be different
2705 * in each node depending on the size of each node and how evenly kernelcore
2706 * is distributed. This helper function adjusts the zone ranges
2707 * provided by the architecture for a given node by using the end of the
2708 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
2709 * zones within a node are in order of monotonic increases memory addresses
2710 */
2711void __meminit adjust_zone_range_for_zone_movable(int nid,
2712 unsigned long zone_type,
2713 unsigned long node_start_pfn,
2714 unsigned long node_end_pfn,
2715 unsigned long *zone_start_pfn,
2716 unsigned long *zone_end_pfn)
2717{
2718 /* Only adjust if ZONE_MOVABLE is on this node */
2719 if (zone_movable_pfn[nid]) {
2720 /* Size ZONE_MOVABLE */
2721 if (zone_type == ZONE_MOVABLE) {
2722 *zone_start_pfn = zone_movable_pfn[nid];
2723 *zone_end_pfn = min(node_end_pfn,
2724 arch_zone_highest_possible_pfn[movable_zone]);
2725
2726 /* Adjust for ZONE_MOVABLE starting within this range */
2727 } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
2728 *zone_end_pfn > zone_movable_pfn[nid]) {
2729 *zone_end_pfn = zone_movable_pfn[nid];
2730
2731 /* Check if this whole range is within ZONE_MOVABLE */
2732 } else if (*zone_start_pfn >= zone_movable_pfn[nid])
2733 *zone_start_pfn = *zone_end_pfn;
2734 }
2735}
2736
2737/*
2670 * Return the number of pages a zone spans in a node, including holes 2738 * Return the number of pages a zone spans in a node, including holes
2671 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2739 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
2672 */ 2740 */
@@ -2681,6 +2749,9 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
2681 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2749 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
2682 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 2750 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
2683 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 2751 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
2752 adjust_zone_range_for_zone_movable(nid, zone_type,
2753 node_start_pfn, node_end_pfn,
2754 &zone_start_pfn, &zone_end_pfn);
2684 2755
2685 /* Check that this node has pages within the zone's required range */ 2756 /* Check that this node has pages within the zone's required range */
2686 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 2757 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
@@ -2771,6 +2842,9 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
2771 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 2842 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
2772 node_end_pfn); 2843 node_end_pfn);
2773 2844
2845 adjust_zone_range_for_zone_movable(nid, zone_type,
2846 node_start_pfn, node_end_pfn,
2847 &zone_start_pfn, &zone_end_pfn);
2774 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 2848 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
2775} 2849}
2776 2850
@@ -3148,6 +3222,157 @@ unsigned long __init find_max_pfn_with_active_regions(void)
3148 return max_pfn; 3222 return max_pfn;
3149} 3223}
3150 3224
3225unsigned long __init early_calculate_totalpages(void)
3226{
3227 int i;
3228 unsigned long totalpages = 0;
3229
3230 for (i = 0; i < nr_nodemap_entries; i++)
3231 totalpages += early_node_map[i].end_pfn -
3232 early_node_map[i].start_pfn;
3233
3234 return totalpages;
3235}
3236
3237/*
3238 * Find the PFN the Movable zone begins in each node. Kernel memory
3239 * is spread evenly between nodes as long as the nodes have enough
3240 * memory. When they don't, some nodes will have more kernelcore than
3241 * others
3242 */
3243void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3244{
3245 int i, nid;
3246 unsigned long usable_startpfn;
3247 unsigned long kernelcore_node, kernelcore_remaining;
3248 int usable_nodes = num_online_nodes();
3249
3250 /*
3251 * If movablecore was specified, calculate what size of
3252 * kernelcore that corresponds so that memory usable for
3253 * any allocation type is evenly spread. If both kernelcore
3254 * and movablecore are specified, then the value of kernelcore
3255 * will be used for required_kernelcore if it's greater than
3256 * what movablecore would have allowed.
3257 */
3258 if (required_movablecore) {
3259 unsigned long totalpages = early_calculate_totalpages();
3260 unsigned long corepages;
3261
3262 /*
3263 * Round-up so that ZONE_MOVABLE is at least as large as what
3264 * was requested by the user
3265 */
3266 required_movablecore =
3267 roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3268 corepages = totalpages - required_movablecore;
3269
3270 required_kernelcore = max(required_kernelcore, corepages);
3271 }
3272
3273 /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3274 if (!required_kernelcore)
3275 return;
3276
3277 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3278 find_usable_zone_for_movable();
3279 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3280
3281restart:
3282 /* Spread kernelcore memory as evenly as possible throughout nodes */
3283 kernelcore_node = required_kernelcore / usable_nodes;
3284 for_each_online_node(nid) {
3285 /*
3286 * Recalculate kernelcore_node if the division per node
3287 * now exceeds what is necessary to satisfy the requested
3288 * amount of memory for the kernel
3289 */
3290 if (required_kernelcore < kernelcore_node)
3291 kernelcore_node = required_kernelcore / usable_nodes;
3292
3293 /*
3294 * As the map is walked, we track how much memory is usable
3295 * by the kernel using kernelcore_remaining. When it is
3296 * 0, the rest of the node is usable by ZONE_MOVABLE
3297 */
3298 kernelcore_remaining = kernelcore_node;
3299
3300 /* Go through each range of PFNs within this node */
3301 for_each_active_range_index_in_nid(i, nid) {
3302 unsigned long start_pfn, end_pfn;
3303 unsigned long size_pages;
3304
3305 start_pfn = max(early_node_map[i].start_pfn,
3306 zone_movable_pfn[nid]);
3307 end_pfn = early_node_map[i].end_pfn;
3308 if (start_pfn >= end_pfn)
3309 continue;
3310
3311 /* Account for what is only usable for kernelcore */
3312 if (start_pfn < usable_startpfn) {
3313 unsigned long kernel_pages;
3314 kernel_pages = min(end_pfn, usable_startpfn)
3315 - start_pfn;
3316
3317 kernelcore_remaining -= min(kernel_pages,
3318 kernelcore_remaining);
3319 required_kernelcore -= min(kernel_pages,
3320 required_kernelcore);
3321
3322 /* Continue if range is now fully accounted */
3323 if (end_pfn <= usable_startpfn) {
3324
3325 /*
3326 * Push zone_movable_pfn to the end so
3327 * that if we have to rebalance
3328 * kernelcore across nodes, we will
3329 * not double account here
3330 */
3331 zone_movable_pfn[nid] = end_pfn;
3332 continue;
3333 }
3334 start_pfn = usable_startpfn;
3335 }
3336
3337 /*
3338 * The usable PFN range for ZONE_MOVABLE is from
3339 * start_pfn->end_pfn. Calculate size_pages as the
3340 * number of pages used as kernelcore
3341 */
3342 size_pages = end_pfn - start_pfn;
3343 if (size_pages > kernelcore_remaining)
3344 size_pages = kernelcore_remaining;
3345 zone_movable_pfn[nid] = start_pfn + size_pages;
3346
3347 /*
3348 * Some kernelcore has been met, update counts and
3349 * break if the kernelcore for this node has been
3350 * satisified
3351 */
3352 required_kernelcore -= min(required_kernelcore,
3353 size_pages);
3354 kernelcore_remaining -= size_pages;
3355 if (!kernelcore_remaining)
3356 break;
3357 }
3358 }
3359
3360 /*
3361 * If there is still required_kernelcore, we do another pass with one
3362 * less node in the count. This will push zone_movable_pfn[nid] further
3363 * along on the nodes that still have memory until kernelcore is
3364 * satisified
3365 */
3366 usable_nodes--;
3367 if (usable_nodes && required_kernelcore > usable_nodes)
3368 goto restart;
3369
3370 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3371 for (nid = 0; nid < MAX_NUMNODES; nid++)
3372 zone_movable_pfn[nid] =
3373 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3374}
3375
3151/** 3376/**
3152 * free_area_init_nodes - Initialise all pg_data_t and zone data 3377 * free_area_init_nodes - Initialise all pg_data_t and zone data
3153 * @max_zone_pfn: an array of max PFNs for each zone 3378 * @max_zone_pfn: an array of max PFNs for each zone
@@ -3177,19 +3402,37 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3177 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 3402 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
3178 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 3403 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
3179 for (i = 1; i < MAX_NR_ZONES; i++) { 3404 for (i = 1; i < MAX_NR_ZONES; i++) {
3405 if (i == ZONE_MOVABLE)
3406 continue;
3180 arch_zone_lowest_possible_pfn[i] = 3407 arch_zone_lowest_possible_pfn[i] =
3181 arch_zone_highest_possible_pfn[i-1]; 3408 arch_zone_highest_possible_pfn[i-1];
3182 arch_zone_highest_possible_pfn[i] = 3409 arch_zone_highest_possible_pfn[i] =
3183 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 3410 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
3184 } 3411 }
3412 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
3413 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
3414
3415 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
3416 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
3417 find_zone_movable_pfns_for_nodes(zone_movable_pfn);
3185 3418
3186 /* Print out the zone ranges */ 3419 /* Print out the zone ranges */
3187 printk("Zone PFN ranges:\n"); 3420 printk("Zone PFN ranges:\n");
3188 for (i = 0; i < MAX_NR_ZONES; i++) 3421 for (i = 0; i < MAX_NR_ZONES; i++) {
3422 if (i == ZONE_MOVABLE)
3423 continue;
3189 printk(" %-8s %8lu -> %8lu\n", 3424 printk(" %-8s %8lu -> %8lu\n",
3190 zone_names[i], 3425 zone_names[i],
3191 arch_zone_lowest_possible_pfn[i], 3426 arch_zone_lowest_possible_pfn[i],
3192 arch_zone_highest_possible_pfn[i]); 3427 arch_zone_highest_possible_pfn[i]);
3428 }
3429
3430 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
3431 printk("Movable zone start PFN for each node\n");
3432 for (i = 0; i < MAX_NUMNODES; i++) {
3433 if (zone_movable_pfn[i])
3434 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]);
3435 }
3193 3436
3194 /* Print out the early_node_map[] */ 3437 /* Print out the early_node_map[] */
3195 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 3438 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
@@ -3206,6 +3449,43 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3206 find_min_pfn_for_node(nid), NULL); 3449 find_min_pfn_for_node(nid), NULL);
3207 } 3450 }
3208} 3451}
3452
3453static int __init cmdline_parse_core(char *p, unsigned long *core)
3454{
3455 unsigned long long coremem;
3456 if (!p)
3457 return -EINVAL;
3458
3459 coremem = memparse(p, &p);
3460 *core = coremem >> PAGE_SHIFT;
3461
3462 /* Paranoid check that UL is enough for the coremem value */
3463 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
3464
3465 return 0;
3466}
3467
3468/*
3469 * kernelcore=size sets the amount of memory for use for allocations that
3470 * cannot be reclaimed or migrated.
3471 */
3472static int __init cmdline_parse_kernelcore(char *p)
3473{
3474 return cmdline_parse_core(p, &required_kernelcore);
3475}
3476
3477/*
3478 * movablecore=size sets the amount of memory for use for allocations that
3479 * can be reclaimed or migrated.
3480 */
3481static int __init cmdline_parse_movablecore(char *p)
3482{
3483 return cmdline_parse_core(p, &required_movablecore);
3484}
3485
3486early_param("kernelcore", cmdline_parse_kernelcore);
3487early_param("movablecore", cmdline_parse_movablecore);
3488
3209#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 3489#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3210 3490
3211/** 3491/**
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 8ce0900dc95c..8f6ee073c0e3 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -92,6 +92,7 @@ struct pdflush_work {
92static int __pdflush(struct pdflush_work *my_work) 92static int __pdflush(struct pdflush_work *my_work)
93{ 93{
94 current->flags |= PF_FLUSHER | PF_SWAPWRITE; 94 current->flags |= PF_FLUSHER | PF_SWAPWRITE;
95 set_freezable();
95 my_work->fn = NULL; 96 my_work->fn = NULL;
96 my_work->who = current; 97 my_work->who = current;
97 INIT_LIST_HEAD(&my_work->list); 98 INIT_LIST_HEAD(&my_work->list);
diff --git a/mm/shmem.c b/mm/shmem.c
index 0493e4d0bcaa..96fa79fb6ad3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -27,6 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/xattr.h> 29#include <linux/xattr.h>
30#include <linux/exportfs.h>
30#include <linux/generic_acl.h> 31#include <linux/generic_acl.h>
31#include <linux/mm.h> 32#include <linux/mm.h>
32#include <linux/mman.h> 33#include <linux/mman.h>
@@ -93,8 +94,11 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
93 * The above definition of ENTRIES_PER_PAGE, and the use of 94 * The above definition of ENTRIES_PER_PAGE, and the use of
94 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 95 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
95 * might be reconsidered if it ever diverges from PAGE_SIZE. 96 * might be reconsidered if it ever diverges from PAGE_SIZE.
97 *
98 * __GFP_MOVABLE is masked out as swap vectors cannot move
96 */ 99 */
97 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); 100 return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
101 PAGE_CACHE_SHIFT-PAGE_SHIFT);
98} 102}
99 103
100static inline void shmem_dir_free(struct page *page) 104static inline void shmem_dir_free(struct page *page)
@@ -372,7 +376,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
372 } 376 }
373 377
374 spin_unlock(&info->lock); 378 spin_unlock(&info->lock);
375 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); 379 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
376 if (page) 380 if (page)
377 set_page_private(page, 0); 381 set_page_private(page, 0);
378 spin_lock(&info->lock); 382 spin_lock(&info->lock);
diff --git a/mm/slab.c b/mm/slab.c
index a453383333fc..96d30ee256ef 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -775,6 +775,9 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
775 */ 775 */
776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); 776 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
777#endif 777#endif
778 if (!size)
779 return ZERO_SIZE_PTR;
780
778 while (size > csizep->cs_size) 781 while (size > csizep->cs_size)
779 csizep++; 782 csizep++;
780 783
@@ -2351,7 +2354,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2351 * this should not happen at all. 2354 * this should not happen at all.
2352 * But leave a BUG_ON for some lucky dude. 2355 * But leave a BUG_ON for some lucky dude.
2353 */ 2356 */
2354 BUG_ON(!cachep->slabp_cache); 2357 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2355 } 2358 }
2356 cachep->ctor = ctor; 2359 cachep->ctor = ctor;
2357 cachep->name = name; 2360 cachep->name = name;
@@ -2743,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
2743 * Be lazy and only check for valid flags here, keeping it out of the 2746 * Be lazy and only check for valid flags here, keeping it out of the
2744 * critical path in kmem_cache_alloc(). 2747 * critical path in kmem_cache_alloc().
2745 */ 2748 */
2746 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 2749 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
2747 2750
2748 local_flags = (flags & GFP_LEVEL_MASK); 2751 local_flags = (flags & GFP_LEVEL_MASK);
2749 /* Take the l3 list lock to change the colour_next on this node */ 2752 /* Take the l3 list lock to change the colour_next on this node */
@@ -3389,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3389 local_irq_restore(save_flags); 3392 local_irq_restore(save_flags);
3390 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); 3393 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3391 3394
3395 if (unlikely((flags & __GFP_ZERO) && ptr))
3396 memset(ptr, 0, obj_size(cachep));
3397
3392 return ptr; 3398 return ptr;
3393} 3399}
3394 3400
@@ -3440,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3440 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); 3446 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3441 prefetchw(objp); 3447 prefetchw(objp);
3442 3448
3449 if (unlikely((flags & __GFP_ZERO) && objp))
3450 memset(objp, 0, obj_size(cachep));
3451
3443 return objp; 3452 return objp;
3444} 3453}
3445 3454
@@ -3581,23 +3590,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3581EXPORT_SYMBOL(kmem_cache_alloc); 3590EXPORT_SYMBOL(kmem_cache_alloc);
3582 3591
3583/** 3592/**
3584 * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
3585 * @cache: The cache to allocate from.
3586 * @flags: See kmalloc().
3587 *
3588 * Allocate an object from this cache and set the allocated memory to zero.
3589 * The flags are only relevant if the cache has no available objects.
3590 */
3591void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3592{
3593 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3594 if (ret)
3595 memset(ret, 0, obj_size(cache));
3596 return ret;
3597}
3598EXPORT_SYMBOL(kmem_cache_zalloc);
3599
3600/**
3601 * kmem_ptr_validate - check if an untrusted pointer might 3593 * kmem_ptr_validate - check if an untrusted pointer might
3602 * be a slab entry. 3594 * be a slab entry.
3603 * @cachep: the cache we're checking against 3595 * @cachep: the cache we're checking against
@@ -3653,8 +3645,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3653 struct kmem_cache *cachep; 3645 struct kmem_cache *cachep;
3654 3646
3655 cachep = kmem_find_general_cachep(size, flags); 3647 cachep = kmem_find_general_cachep(size, flags);
3656 if (unlikely(cachep == NULL)) 3648 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3657 return NULL; 3649 return cachep;
3658 return kmem_cache_alloc_node(cachep, flags, node); 3650 return kmem_cache_alloc_node(cachep, flags, node);
3659} 3651}
3660 3652
@@ -3726,52 +3718,6 @@ EXPORT_SYMBOL(__kmalloc);
3726#endif 3718#endif
3727 3719
3728/** 3720/**
3729 * krealloc - reallocate memory. The contents will remain unchanged.
3730 * @p: object to reallocate memory for.
3731 * @new_size: how many bytes of memory are required.
3732 * @flags: the type of memory to allocate.
3733 *
3734 * The contents of the object pointed to are preserved up to the
3735 * lesser of the new and old sizes. If @p is %NULL, krealloc()
3736 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
3737 * %NULL pointer, the object pointed to is freed.
3738 */
3739void *krealloc(const void *p, size_t new_size, gfp_t flags)
3740{
3741 struct kmem_cache *cache, *new_cache;
3742 void *ret;
3743
3744 if (unlikely(!p))
3745 return kmalloc_track_caller(new_size, flags);
3746
3747 if (unlikely(!new_size)) {
3748 kfree(p);
3749 return NULL;
3750 }
3751
3752 cache = virt_to_cache(p);
3753 new_cache = __find_general_cachep(new_size, flags);
3754
3755 /*
3756 * If new size fits in the current cache, bail out.
3757 */
3758 if (likely(cache == new_cache))
3759 return (void *)p;
3760
3761 /*
3762 * We are on the slow-path here so do not use __cache_alloc
3763 * because it bloats kernel text.
3764 */
3765 ret = kmalloc_track_caller(new_size, flags);
3766 if (ret) {
3767 memcpy(ret, p, min(new_size, ksize(p)));
3768 kfree(p);
3769 }
3770 return ret;
3771}
3772EXPORT_SYMBOL(krealloc);
3773
3774/**
3775 * kmem_cache_free - Deallocate an object 3721 * kmem_cache_free - Deallocate an object
3776 * @cachep: The cache the allocation was from. 3722 * @cachep: The cache the allocation was from.
3777 * @objp: The previously allocated object. 3723 * @objp: The previously allocated object.
@@ -3806,7 +3752,7 @@ void kfree(const void *objp)
3806 struct kmem_cache *c; 3752 struct kmem_cache *c;
3807 unsigned long flags; 3753 unsigned long flags;
3808 3754
3809 if (unlikely(!objp)) 3755 if (unlikely(ZERO_OR_NULL_PTR(objp)))
3810 return; 3756 return;
3811 local_irq_save(flags); 3757 local_irq_save(flags);
3812 kfree_debugcheck(objp); 3758 kfree_debugcheck(objp);
@@ -4398,7 +4344,7 @@ static void show_symbol(struct seq_file *m, unsigned long address)
4398{ 4344{
4399#ifdef CONFIG_KALLSYMS 4345#ifdef CONFIG_KALLSYMS
4400 unsigned long offset, size; 4346 unsigned long offset, size;
4401 char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1]; 4347 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4402 4348
4403 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4349 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4404 seq_printf(m, "%s+%#lx/%#lx", name, offset, size); 4350 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
@@ -4493,7 +4439,7 @@ const struct seq_operations slabstats_op = {
4493 */ 4439 */
4494size_t ksize(const void *objp) 4440size_t ksize(const void *objp)
4495{ 4441{
4496 if (unlikely(objp == NULL)) 4442 if (unlikely(ZERO_OR_NULL_PTR(objp)))
4497 return 0; 4443 return 0;
4498 4444
4499 return obj_size(virt_to_cache(objp)); 4445 return obj_size(virt_to_cache(objp));
diff --git a/mm/slob.c b/mm/slob.c
index b4899079d8b0..c89ef116d7aa 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
334 BUG_ON(!b); 334 BUG_ON(!b);
335 spin_unlock_irqrestore(&slob_lock, flags); 335 spin_unlock_irqrestore(&slob_lock, flags);
336 } 336 }
337 if (unlikely((gfp & __GFP_ZERO) && b))
338 memset(b, 0, size);
337 return b; 339 return b;
338} 340}
339 341
@@ -347,7 +349,7 @@ static void slob_free(void *block, int size)
347 slobidx_t units; 349 slobidx_t units;
348 unsigned long flags; 350 unsigned long flags;
349 351
350 if (!block) 352 if (ZERO_OR_NULL_PTR(block))
351 return; 353 return;
352 BUG_ON(!size); 354 BUG_ON(!size);
353 355
@@ -424,10 +426,13 @@ out:
424 426
425void *__kmalloc_node(size_t size, gfp_t gfp, int node) 427void *__kmalloc_node(size_t size, gfp_t gfp, int node)
426{ 428{
429 unsigned int *m;
427 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 430 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
428 431
429 if (size < PAGE_SIZE - align) { 432 if (size < PAGE_SIZE - align) {
430 unsigned int *m; 433 if (!size)
434 return ZERO_SIZE_PTR;
435
431 m = slob_alloc(size + align, gfp, align, node); 436 m = slob_alloc(size + align, gfp, align, node);
432 if (m) 437 if (m)
433 *m = size; 438 *m = size;
@@ -446,44 +451,11 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
446} 451}
447EXPORT_SYMBOL(__kmalloc_node); 452EXPORT_SYMBOL(__kmalloc_node);
448 453
449/**
450 * krealloc - reallocate memory. The contents will remain unchanged.
451 *
452 * @p: object to reallocate memory for.
453 * @new_size: how many bytes of memory are required.
454 * @flags: the type of memory to allocate.
455 *
456 * The contents of the object pointed to are preserved up to the
457 * lesser of the new and old sizes. If @p is %NULL, krealloc()
458 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
459 * %NULL pointer, the object pointed to is freed.
460 */
461void *krealloc(const void *p, size_t new_size, gfp_t flags)
462{
463 void *ret;
464
465 if (unlikely(!p))
466 return kmalloc_track_caller(new_size, flags);
467
468 if (unlikely(!new_size)) {
469 kfree(p);
470 return NULL;
471 }
472
473 ret = kmalloc_track_caller(new_size, flags);
474 if (ret) {
475 memcpy(ret, p, min(new_size, ksize(p)));
476 kfree(p);
477 }
478 return ret;
479}
480EXPORT_SYMBOL(krealloc);
481
482void kfree(const void *block) 454void kfree(const void *block)
483{ 455{
484 struct slob_page *sp; 456 struct slob_page *sp;
485 457
486 if (!block) 458 if (ZERO_OR_NULL_PTR(block))
487 return; 459 return;
488 460
489 sp = (struct slob_page *)virt_to_page(block); 461 sp = (struct slob_page *)virt_to_page(block);
@@ -501,7 +473,7 @@ size_t ksize(const void *block)
501{ 473{
502 struct slob_page *sp; 474 struct slob_page *sp;
503 475
504 if (!block) 476 if (ZERO_OR_NULL_PTR(block))
505 return 0; 477 return 0;
506 478
507 sp = (struct slob_page *)virt_to_page(block); 479 sp = (struct slob_page *)virt_to_page(block);
@@ -571,16 +543,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
571} 543}
572EXPORT_SYMBOL(kmem_cache_alloc_node); 544EXPORT_SYMBOL(kmem_cache_alloc_node);
573 545
574void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
575{
576 void *ret = kmem_cache_alloc(c, flags);
577 if (ret)
578 memset(ret, 0, c->size);
579
580 return ret;
581}
582EXPORT_SYMBOL(kmem_cache_zalloc);
583
584static void __kmem_cache_free(void *b, int size) 546static void __kmem_cache_free(void *b, int size)
585{ 547{
586 if (size < PAGE_SIZE) 548 if (size < PAGE_SIZE)
diff --git a/mm/slub.c b/mm/slub.c
index 6aea48942c29..52a4f44be394 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -205,6 +205,11 @@ static inline void ClearSlabDebug(struct page *page)
205#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 205#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
206#endif 206#endif
207 207
208/*
209 * The page->inuse field is 16 bit thus we have this limitation
210 */
211#define MAX_OBJECTS_PER_SLAB 65535
212
208/* Internal SLUB flags */ 213/* Internal SLUB flags */
209#define __OBJECT_POISON 0x80000000 /* Poison object */ 214#define __OBJECT_POISON 0x80000000 /* Poison object */
210 215
@@ -228,7 +233,7 @@ static enum {
228 233
229/* A list of all slab caches on the system */ 234/* A list of all slab caches on the system */
230static DECLARE_RWSEM(slub_lock); 235static DECLARE_RWSEM(slub_lock);
231LIST_HEAD(slab_caches); 236static LIST_HEAD(slab_caches);
232 237
233/* 238/*
234 * Tracking user of a slab. 239 * Tracking user of a slab.
@@ -247,9 +252,10 @@ static int sysfs_slab_add(struct kmem_cache *);
247static int sysfs_slab_alias(struct kmem_cache *, const char *); 252static int sysfs_slab_alias(struct kmem_cache *, const char *);
248static void sysfs_slab_remove(struct kmem_cache *); 253static void sysfs_slab_remove(struct kmem_cache *);
249#else 254#else
250static int sysfs_slab_add(struct kmem_cache *s) { return 0; } 255static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
251static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } 256static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
252static void sysfs_slab_remove(struct kmem_cache *s) {} 257 { return 0; }
258static inline void sysfs_slab_remove(struct kmem_cache *s) {}
253#endif 259#endif
254 260
255/******************************************************************** 261/********************************************************************
@@ -344,7 +350,7 @@ static void print_section(char *text, u8 *addr, unsigned int length)
344 350
345 for (i = 0; i < length; i++) { 351 for (i = 0; i < length; i++) {
346 if (newline) { 352 if (newline) {
347 printk(KERN_ERR "%10s 0x%p: ", text, addr + i); 353 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
348 newline = 0; 354 newline = 0;
349 } 355 }
350 printk(" %02x", addr[i]); 356 printk(" %02x", addr[i]);
@@ -401,10 +407,11 @@ static void set_track(struct kmem_cache *s, void *object,
401 407
402static void init_tracking(struct kmem_cache *s, void *object) 408static void init_tracking(struct kmem_cache *s, void *object)
403{ 409{
404 if (s->flags & SLAB_STORE_USER) { 410 if (!(s->flags & SLAB_STORE_USER))
405 set_track(s, object, TRACK_FREE, NULL); 411 return;
406 set_track(s, object, TRACK_ALLOC, NULL); 412
407 } 413 set_track(s, object, TRACK_FREE, NULL);
414 set_track(s, object, TRACK_ALLOC, NULL);
408} 415}
409 416
410static void print_track(const char *s, struct track *t) 417static void print_track(const char *s, struct track *t)
@@ -412,65 +419,106 @@ static void print_track(const char *s, struct track *t)
412 if (!t->addr) 419 if (!t->addr)
413 return; 420 return;
414 421
415 printk(KERN_ERR "%s: ", s); 422 printk(KERN_ERR "INFO: %s in ", s);
416 __print_symbol("%s", (unsigned long)t->addr); 423 __print_symbol("%s", (unsigned long)t->addr);
417 printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid); 424 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
425}
426
427static void print_tracking(struct kmem_cache *s, void *object)
428{
429 if (!(s->flags & SLAB_STORE_USER))
430 return;
431
432 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
433 print_track("Freed", get_track(s, object, TRACK_FREE));
434}
435
436static void print_page_info(struct page *page)
437{
438 printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
439 page, page->inuse, page->freelist, page->flags);
440
441}
442
443static void slab_bug(struct kmem_cache *s, char *fmt, ...)
444{
445 va_list args;
446 char buf[100];
447
448 va_start(args, fmt);
449 vsnprintf(buf, sizeof(buf), fmt, args);
450 va_end(args);
451 printk(KERN_ERR "========================================"
452 "=====================================\n");
453 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
454 printk(KERN_ERR "----------------------------------------"
455 "-------------------------------------\n\n");
418} 456}
419 457
420static void print_trailer(struct kmem_cache *s, u8 *p) 458static void slab_fix(struct kmem_cache *s, char *fmt, ...)
459{
460 va_list args;
461 char buf[100];
462
463 va_start(args, fmt);
464 vsnprintf(buf, sizeof(buf), fmt, args);
465 va_end(args);
466 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
467}
468
469static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
421{ 470{
422 unsigned int off; /* Offset of last byte */ 471 unsigned int off; /* Offset of last byte */
472 u8 *addr = page_address(page);
473
474 print_tracking(s, p);
475
476 print_page_info(page);
477
478 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
479 p, p - addr, get_freepointer(s, p));
480
481 if (p > addr + 16)
482 print_section("Bytes b4", p - 16, 16);
483
484 print_section("Object", p, min(s->objsize, 128));
423 485
424 if (s->flags & SLAB_RED_ZONE) 486 if (s->flags & SLAB_RED_ZONE)
425 print_section("Redzone", p + s->objsize, 487 print_section("Redzone", p + s->objsize,
426 s->inuse - s->objsize); 488 s->inuse - s->objsize);
427 489
428 printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
429 p + s->offset,
430 get_freepointer(s, p));
431
432 if (s->offset) 490 if (s->offset)
433 off = s->offset + sizeof(void *); 491 off = s->offset + sizeof(void *);
434 else 492 else
435 off = s->inuse; 493 off = s->inuse;
436 494
437 if (s->flags & SLAB_STORE_USER) { 495 if (s->flags & SLAB_STORE_USER)
438 print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
439 print_track("Last free ", get_track(s, p, TRACK_FREE));
440 off += 2 * sizeof(struct track); 496 off += 2 * sizeof(struct track);
441 }
442 497
443 if (off != s->size) 498 if (off != s->size)
444 /* Beginning of the filler is the free pointer */ 499 /* Beginning of the filler is the free pointer */
445 print_section("Filler", p + off, s->size - off); 500 print_section("Padding", p + off, s->size - off);
501
502 dump_stack();
446} 503}
447 504
448static void object_err(struct kmem_cache *s, struct page *page, 505static void object_err(struct kmem_cache *s, struct page *page,
449 u8 *object, char *reason) 506 u8 *object, char *reason)
450{ 507{
451 u8 *addr = page_address(page); 508 slab_bug(s, reason);
452 509 print_trailer(s, page, object);
453 printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
454 s->name, reason, object, page);
455 printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
456 object - addr, page->flags, page->inuse, page->freelist);
457 if (object > addr + 16)
458 print_section("Bytes b4", object - 16, 16);
459 print_section("Object", object, min(s->objsize, 128));
460 print_trailer(s, object);
461 dump_stack();
462} 510}
463 511
464static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...) 512static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
465{ 513{
466 va_list args; 514 va_list args;
467 char buf[100]; 515 char buf[100];
468 516
469 va_start(args, reason); 517 va_start(args, fmt);
470 vsnprintf(buf, sizeof(buf), reason, args); 518 vsnprintf(buf, sizeof(buf), fmt, args);
471 va_end(args); 519 va_end(args);
472 printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf, 520 slab_bug(s, fmt);
473 page); 521 print_page_info(page);
474 dump_stack(); 522 dump_stack();
475} 523}
476 524
@@ -489,15 +537,46 @@ static void init_object(struct kmem_cache *s, void *object, int active)
489 s->inuse - s->objsize); 537 s->inuse - s->objsize);
490} 538}
491 539
492static int check_bytes(u8 *start, unsigned int value, unsigned int bytes) 540static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
493{ 541{
494 while (bytes) { 542 while (bytes) {
495 if (*start != (u8)value) 543 if (*start != (u8)value)
496 return 0; 544 return start;
497 start++; 545 start++;
498 bytes--; 546 bytes--;
499 } 547 }
500 return 1; 548 return NULL;
549}
550
551static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
552 void *from, void *to)
553{
554 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
555 memset(from, data, to - from);
556}
557
558static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
559 u8 *object, char *what,
560 u8* start, unsigned int value, unsigned int bytes)
561{
562 u8 *fault;
563 u8 *end;
564
565 fault = check_bytes(start, value, bytes);
566 if (!fault)
567 return 1;
568
569 end = start + bytes;
570 while (end > fault && end[-1] == value)
571 end--;
572
573 slab_bug(s, "%s overwritten", what);
574 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
575 fault, end - 1, fault[0], value);
576 print_trailer(s, page, object);
577
578 restore_bytes(s, what, value, fault, end);
579 return 0;
501} 580}
502 581
503/* 582/*
@@ -538,14 +617,6 @@ static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
538 * may be used with merged slabcaches. 617 * may be used with merged slabcaches.
539 */ 618 */
540 619
541static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
542 void *from, void *to)
543{
544 printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
545 s->name, message, data, from, to - 1);
546 memset(from, data, to - from);
547}
548
549static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) 620static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
550{ 621{
551 unsigned long off = s->inuse; /* The end of info */ 622 unsigned long off = s->inuse; /* The end of info */
@@ -561,39 +632,39 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
561 if (s->size == off) 632 if (s->size == off)
562 return 1; 633 return 1;
563 634
564 if (check_bytes(p + off, POISON_INUSE, s->size - off)) 635 return check_bytes_and_report(s, page, p, "Object padding",
565 return 1; 636 p + off, POISON_INUSE, s->size - off);
566
567 object_err(s, page, p, "Object padding check fails");
568
569 /*
570 * Restore padding
571 */
572 restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
573 return 0;
574} 637}
575 638
576static int slab_pad_check(struct kmem_cache *s, struct page *page) 639static int slab_pad_check(struct kmem_cache *s, struct page *page)
577{ 640{
578 u8 *p; 641 u8 *start;
579 int length, remainder; 642 u8 *fault;
643 u8 *end;
644 int length;
645 int remainder;
580 646
581 if (!(s->flags & SLAB_POISON)) 647 if (!(s->flags & SLAB_POISON))
582 return 1; 648 return 1;
583 649
584 p = page_address(page); 650 start = page_address(page);
651 end = start + (PAGE_SIZE << s->order);
585 length = s->objects * s->size; 652 length = s->objects * s->size;
586 remainder = (PAGE_SIZE << s->order) - length; 653 remainder = end - (start + length);
587 if (!remainder) 654 if (!remainder)
588 return 1; 655 return 1;
589 656
590 if (!check_bytes(p + length, POISON_INUSE, remainder)) { 657 fault = check_bytes(start + length, POISON_INUSE, remainder);
591 slab_err(s, page, "Padding check failed"); 658 if (!fault)
592 restore_bytes(s, "slab padding", POISON_INUSE, p + length, 659 return 1;
593 p + length + remainder); 660 while (end > fault && end[-1] == POISON_INUSE)
594 return 0; 661 end--;
595 } 662
596 return 1; 663 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
664 print_section("Padding", start, length);
665
666 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
667 return 0;
597} 668}
598 669
599static int check_object(struct kmem_cache *s, struct page *page, 670static int check_object(struct kmem_cache *s, struct page *page,
@@ -606,41 +677,22 @@ static int check_object(struct kmem_cache *s, struct page *page,
606 unsigned int red = 677 unsigned int red =
607 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE; 678 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
608 679
609 if (!check_bytes(endobject, red, s->inuse - s->objsize)) { 680 if (!check_bytes_and_report(s, page, object, "Redzone",
610 object_err(s, page, object, 681 endobject, red, s->inuse - s->objsize))
611 active ? "Redzone Active" : "Redzone Inactive");
612 restore_bytes(s, "redzone", red,
613 endobject, object + s->inuse);
614 return 0; 682 return 0;
615 }
616 } else { 683 } else {
617 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse && 684 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
618 !check_bytes(endobject, POISON_INUSE, 685 check_bytes_and_report(s, page, p, "Alignment padding", endobject,
619 s->inuse - s->objsize)) { 686 POISON_INUSE, s->inuse - s->objsize);
620 object_err(s, page, p, "Alignment padding check fails");
621 /*
622 * Fix it so that there will not be another report.
623 *
624 * Hmmm... We may be corrupting an object that now expects
625 * to be longer than allowed.
626 */
627 restore_bytes(s, "alignment padding", POISON_INUSE,
628 endobject, object + s->inuse);
629 }
630 } 687 }
631 688
632 if (s->flags & SLAB_POISON) { 689 if (s->flags & SLAB_POISON) {
633 if (!active && (s->flags & __OBJECT_POISON) && 690 if (!active && (s->flags & __OBJECT_POISON) &&
634 (!check_bytes(p, POISON_FREE, s->objsize - 1) || 691 (!check_bytes_and_report(s, page, p, "Poison", p,
635 p[s->objsize - 1] != POISON_END)) { 692 POISON_FREE, s->objsize - 1) ||
636 693 !check_bytes_and_report(s, page, p, "Poison",
637 object_err(s, page, p, "Poison check failed"); 694 p + s->objsize -1, POISON_END, 1)))
638 restore_bytes(s, "Poison", POISON_FREE,
639 p, p + s->objsize -1);
640 restore_bytes(s, "Poison", POISON_END,
641 p + s->objsize - 1, p + s->objsize);
642 return 0; 695 return 0;
643 }
644 /* 696 /*
645 * check_pad_bytes cleans up on its own. 697 * check_pad_bytes cleans up on its own.
646 */ 698 */
@@ -673,25 +725,17 @@ static int check_slab(struct kmem_cache *s, struct page *page)
673 VM_BUG_ON(!irqs_disabled()); 725 VM_BUG_ON(!irqs_disabled());
674 726
675 if (!PageSlab(page)) { 727 if (!PageSlab(page)) {
676 slab_err(s, page, "Not a valid slab page flags=%lx " 728 slab_err(s, page, "Not a valid slab page");
677 "mapping=0x%p count=%d", page->flags, page->mapping,
678 page_count(page));
679 return 0; 729 return 0;
680 } 730 }
681 if (page->offset * sizeof(void *) != s->offset) { 731 if (page->offset * sizeof(void *) != s->offset) {
682 slab_err(s, page, "Corrupted offset %lu flags=0x%lx " 732 slab_err(s, page, "Corrupted offset %lu",
683 "mapping=0x%p count=%d", 733 (unsigned long)(page->offset * sizeof(void *)));
684 (unsigned long)(page->offset * sizeof(void *)),
685 page->flags,
686 page->mapping,
687 page_count(page));
688 return 0; 734 return 0;
689 } 735 }
690 if (page->inuse > s->objects) { 736 if (page->inuse > s->objects) {
691 slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx " 737 slab_err(s, page, "inuse %u > max %u",
692 "mapping=0x%p count=%d", 738 s->name, page->inuse, s->objects);
693 s->name, page->inuse, s->objects, page->flags,
694 page->mapping, page_count(page));
695 return 0; 739 return 0;
696 } 740 }
697 /* Slab_pad_check fixes things up after itself */ 741 /* Slab_pad_check fixes things up after itself */
@@ -719,13 +763,10 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
719 set_freepointer(s, object, NULL); 763 set_freepointer(s, object, NULL);
720 break; 764 break;
721 } else { 765 } else {
722 slab_err(s, page, "Freepointer 0x%p corrupt", 766 slab_err(s, page, "Freepointer corrupt");
723 fp);
724 page->freelist = NULL; 767 page->freelist = NULL;
725 page->inuse = s->objects; 768 page->inuse = s->objects;
726 printk(KERN_ERR "@@@ SLUB %s: Freelist " 769 slab_fix(s, "Freelist cleared");
727 "cleared. Slab 0x%p\n",
728 s->name, page);
729 return 0; 770 return 0;
730 } 771 }
731 break; 772 break;
@@ -737,11 +778,9 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
737 778
738 if (page->inuse != s->objects - nr) { 779 if (page->inuse != s->objects - nr) {
739 slab_err(s, page, "Wrong object count. Counter is %d but " 780 slab_err(s, page, "Wrong object count. Counter is %d but "
740 "counted were %d", s, page, page->inuse, 781 "counted were %d", page->inuse, s->objects - nr);
741 s->objects - nr);
742 page->inuse = s->objects - nr; 782 page->inuse = s->objects - nr;
743 printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. " 783 slab_fix(s, "Object count adjusted.");
744 "Slab @0x%p\n", s->name, page);
745 } 784 }
746 return search == NULL; 785 return search == NULL;
747} 786}
@@ -803,7 +842,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
803 goto bad; 842 goto bad;
804 843
805 if (object && !on_freelist(s, page, object)) { 844 if (object && !on_freelist(s, page, object)) {
806 slab_err(s, page, "Object 0x%p already allocated", object); 845 object_err(s, page, object, "Object already allocated");
807 goto bad; 846 goto bad;
808 } 847 }
809 848
@@ -829,8 +868,7 @@ bad:
829 * to avoid issues in the future. Marking all objects 868 * to avoid issues in the future. Marking all objects
830 * as used avoids touching the remaining objects. 869 * as used avoids touching the remaining objects.
831 */ 870 */
832 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n", 871 slab_fix(s, "Marking all objects used");
833 s->name, page);
834 page->inuse = s->objects; 872 page->inuse = s->objects;
835 page->freelist = NULL; 873 page->freelist = NULL;
836 /* Fix up fields that may be corrupted */ 874 /* Fix up fields that may be corrupted */
@@ -851,7 +889,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
851 } 889 }
852 890
853 if (on_freelist(s, page, object)) { 891 if (on_freelist(s, page, object)) {
854 slab_err(s, page, "Object 0x%p already free", object); 892 object_err(s, page, object, "Object already free");
855 goto fail; 893 goto fail;
856 } 894 }
857 895
@@ -870,8 +908,8 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
870 dump_stack(); 908 dump_stack();
871 } 909 }
872 else 910 else
873 slab_err(s, page, "object at 0x%p belongs " 911 object_err(s, page, object,
874 "to slab %s", object, page->slab->name); 912 "page slab pointer corrupt.");
875 goto fail; 913 goto fail;
876 } 914 }
877 915
@@ -885,8 +923,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
885 return 1; 923 return 1;
886 924
887fail: 925fail:
888 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n", 926 slab_fix(s, "Object at 0x%p not freed", object);
889 s->name, page, object);
890 return 0; 927 return 0;
891} 928}
892 929
@@ -1041,7 +1078,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1041 void *last; 1078 void *last;
1042 void *p; 1079 void *p;
1043 1080
1044 BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); 1081 BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
1045 1082
1046 if (flags & __GFP_WAIT) 1083 if (flags & __GFP_WAIT)
1047 local_irq_enable(); 1084 local_irq_enable();
@@ -1359,7 +1396,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
1359 unfreeze_slab(s, page); 1396 unfreeze_slab(s, page);
1360} 1397}
1361 1398
1362static void flush_slab(struct kmem_cache *s, struct page *page, int cpu) 1399static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
1363{ 1400{
1364 slab_lock(page); 1401 slab_lock(page);
1365 deactivate_slab(s, page, cpu); 1402 deactivate_slab(s, page, cpu);
@@ -1369,7 +1406,7 @@ static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
1369 * Flush cpu slab. 1406 * Flush cpu slab.
1370 * Called from IPI handler with interrupts disabled. 1407 * Called from IPI handler with interrupts disabled.
1371 */ 1408 */
1372static void __flush_cpu_slab(struct kmem_cache *s, int cpu) 1409static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
1373{ 1410{
1374 struct page *page = s->cpu_slab[cpu]; 1411 struct page *page = s->cpu_slab[cpu];
1375 1412
@@ -1504,7 +1541,7 @@ debug:
1504 * Otherwise we can simply pick the next object from the lockless free list. 1541 * Otherwise we can simply pick the next object from the lockless free list.
1505 */ 1542 */
1506static void __always_inline *slab_alloc(struct kmem_cache *s, 1543static void __always_inline *slab_alloc(struct kmem_cache *s,
1507 gfp_t gfpflags, int node, void *addr) 1544 gfp_t gfpflags, int node, void *addr)
1508{ 1545{
1509 struct page *page; 1546 struct page *page;
1510 void **object; 1547 void **object;
@@ -1522,6 +1559,10 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
1522 page->lockless_freelist = object[page->offset]; 1559 page->lockless_freelist = object[page->offset];
1523 } 1560 }
1524 local_irq_restore(flags); 1561 local_irq_restore(flags);
1562
1563 if (unlikely((gfpflags & __GFP_ZERO) && object))
1564 memset(object, 0, s->objsize);
1565
1525 return object; 1566 return object;
1526} 1567}
1527 1568
@@ -1705,8 +1746,17 @@ static inline int slab_order(int size, int min_objects,
1705{ 1746{
1706 int order; 1747 int order;
1707 int rem; 1748 int rem;
1749 int min_order = slub_min_order;
1708 1750
1709 for (order = max(slub_min_order, 1751 /*
1752 * If we would create too many object per slab then reduce
1753 * the slab order even if it goes below slub_min_order.
1754 */
1755 while (min_order > 0 &&
1756 (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
1757 min_order--;
1758
1759 for (order = max(min_order,
1710 fls(min_objects * size - 1) - PAGE_SHIFT); 1760 fls(min_objects * size - 1) - PAGE_SHIFT);
1711 order <= max_order; order++) { 1761 order <= max_order; order++) {
1712 1762
@@ -1720,6 +1770,9 @@ static inline int slab_order(int size, int min_objects,
1720 if (rem <= slab_size / fract_leftover) 1770 if (rem <= slab_size / fract_leftover)
1721 break; 1771 break;
1722 1772
1773 /* If the next size is too high then exit now */
1774 if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
1775 break;
1723 } 1776 }
1724 1777
1725 return order; 1778 return order;
@@ -1800,7 +1853,9 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
1800 atomic_long_set(&n->nr_slabs, 0); 1853 atomic_long_set(&n->nr_slabs, 0);
1801 spin_lock_init(&n->list_lock); 1854 spin_lock_init(&n->list_lock);
1802 INIT_LIST_HEAD(&n->partial); 1855 INIT_LIST_HEAD(&n->partial);
1856#ifdef CONFIG_SLUB_DEBUG
1803 INIT_LIST_HEAD(&n->full); 1857 INIT_LIST_HEAD(&n->full);
1858#endif
1804} 1859}
1805 1860
1806#ifdef CONFIG_NUMA 1861#ifdef CONFIG_NUMA
@@ -1828,7 +1883,10 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
1828 page->freelist = get_freepointer(kmalloc_caches, n); 1883 page->freelist = get_freepointer(kmalloc_caches, n);
1829 page->inuse++; 1884 page->inuse++;
1830 kmalloc_caches->node[node] = n; 1885 kmalloc_caches->node[node] = n;
1831 setup_object_debug(kmalloc_caches, page, n); 1886#ifdef CONFIG_SLUB_DEBUG
1887 init_object(kmalloc_caches, n, 1);
1888 init_tracking(kmalloc_caches, n);
1889#endif
1832 init_kmem_cache_node(n); 1890 init_kmem_cache_node(n);
1833 atomic_long_inc(&n->nr_slabs); 1891 atomic_long_inc(&n->nr_slabs);
1834 add_partial(n, page); 1892 add_partial(n, page);
@@ -2006,7 +2064,7 @@ static int calculate_sizes(struct kmem_cache *s)
2006 * The page->inuse field is only 16 bit wide! So we cannot have 2064 * The page->inuse field is only 16 bit wide! So we cannot have
2007 * more than 64k objects per slab. 2065 * more than 64k objects per slab.
2008 */ 2066 */
2009 if (!s->objects || s->objects > 65535) 2067 if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
2010 return 0; 2068 return 0;
2011 return 1; 2069 return 1;
2012 2070
@@ -2110,7 +2168,7 @@ static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2110/* 2168/*
2111 * Release all resources used by a slab cache. 2169 * Release all resources used by a slab cache.
2112 */ 2170 */
2113static int kmem_cache_close(struct kmem_cache *s) 2171static inline int kmem_cache_close(struct kmem_cache *s)
2114{ 2172{
2115 int node; 2173 int node;
2116 2174
@@ -2138,12 +2196,13 @@ void kmem_cache_destroy(struct kmem_cache *s)
2138 s->refcount--; 2196 s->refcount--;
2139 if (!s->refcount) { 2197 if (!s->refcount) {
2140 list_del(&s->list); 2198 list_del(&s->list);
2199 up_write(&slub_lock);
2141 if (kmem_cache_close(s)) 2200 if (kmem_cache_close(s))
2142 WARN_ON(1); 2201 WARN_ON(1);
2143 sysfs_slab_remove(s); 2202 sysfs_slab_remove(s);
2144 kfree(s); 2203 kfree(s);
2145 } 2204 } else
2146 up_write(&slub_lock); 2205 up_write(&slub_lock);
2147} 2206}
2148EXPORT_SYMBOL(kmem_cache_destroy); 2207EXPORT_SYMBOL(kmem_cache_destroy);
2149 2208
@@ -2216,47 +2275,92 @@ panic:
2216 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); 2275 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2217} 2276}
2218 2277
2219static struct kmem_cache *get_slab(size_t size, gfp_t flags) 2278#ifdef CONFIG_ZONE_DMA
2279static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2220{ 2280{
2221 int index = kmalloc_index(size); 2281 struct kmem_cache *s;
2282 struct kmem_cache *x;
2283 char *text;
2284 size_t realsize;
2222 2285
2223 if (!index) 2286 s = kmalloc_caches_dma[index];
2224 return NULL; 2287 if (s)
2288 return s;
2225 2289
2226 /* Allocation too large? */ 2290 /* Dynamically create dma cache */
2227 BUG_ON(index < 0); 2291 x = kmalloc(kmem_size, flags & ~SLUB_DMA);
2292 if (!x)
2293 panic("Unable to allocate memory for dma cache\n");
2228 2294
2229#ifdef CONFIG_ZONE_DMA 2295 realsize = kmalloc_caches[index].objsize;
2230 if ((flags & SLUB_DMA)) { 2296 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2231 struct kmem_cache *s; 2297 (unsigned int)realsize);
2232 struct kmem_cache *x; 2298 s = create_kmalloc_cache(x, text, realsize, flags);
2233 char *text; 2299 down_write(&slub_lock);
2234 size_t realsize; 2300 if (!kmalloc_caches_dma[index]) {
2235 2301 kmalloc_caches_dma[index] = s;
2236 s = kmalloc_caches_dma[index]; 2302 up_write(&slub_lock);
2237 if (s) 2303 return s;
2238 return s; 2304 }
2305 up_write(&slub_lock);
2306 kmem_cache_destroy(s);
2307 return kmalloc_caches_dma[index];
2308}
2309#endif
2310
2311/*
2312 * Conversion table for small slabs sizes / 8 to the index in the
2313 * kmalloc array. This is necessary for slabs < 192 since we have non power
2314 * of two cache sizes there. The size of larger slabs can be determined using
2315 * fls.
2316 */
2317static s8 size_index[24] = {
2318 3, /* 8 */
2319 4, /* 16 */
2320 5, /* 24 */
2321 5, /* 32 */
2322 6, /* 40 */
2323 6, /* 48 */
2324 6, /* 56 */
2325 6, /* 64 */
2326 1, /* 72 */
2327 1, /* 80 */
2328 1, /* 88 */
2329 1, /* 96 */
2330 7, /* 104 */
2331 7, /* 112 */
2332 7, /* 120 */
2333 7, /* 128 */
2334 2, /* 136 */
2335 2, /* 144 */
2336 2, /* 152 */
2337 2, /* 160 */
2338 2, /* 168 */
2339 2, /* 176 */
2340 2, /* 184 */
2341 2 /* 192 */
2342};
2239 2343
2240 /* Dynamically create dma cache */ 2344static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2241 x = kmalloc(kmem_size, flags & ~SLUB_DMA); 2345{
2242 if (!x) 2346 int index;
2243 panic("Unable to allocate memory for dma cache\n");
2244 2347
2245 if (index <= KMALLOC_SHIFT_HIGH) 2348 if (size <= 192) {
2246 realsize = 1 << index; 2349 if (!size)
2247 else { 2350 return ZERO_SIZE_PTR;
2248 if (index == 1)
2249 realsize = 96;
2250 else
2251 realsize = 192;
2252 }
2253 2351
2254 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", 2352 index = size_index[(size - 1) / 8];
2255 (unsigned int)realsize); 2353 } else {
2256 s = create_kmalloc_cache(x, text, realsize, flags); 2354 if (size > KMALLOC_MAX_SIZE)
2257 kmalloc_caches_dma[index] = s; 2355 return NULL;
2258 return s; 2356
2357 index = fls(size - 1);
2259 } 2358 }
2359
2360#ifdef CONFIG_ZONE_DMA
2361 if (unlikely((flags & SLUB_DMA)))
2362 return dma_kmalloc_cache(index, flags);
2363
2260#endif 2364#endif
2261 return &kmalloc_caches[index]; 2365 return &kmalloc_caches[index];
2262} 2366}
@@ -2265,9 +2369,10 @@ void *__kmalloc(size_t size, gfp_t flags)
2265{ 2369{
2266 struct kmem_cache *s = get_slab(size, flags); 2370 struct kmem_cache *s = get_slab(size, flags);
2267 2371
2268 if (s) 2372 if (ZERO_OR_NULL_PTR(s))
2269 return slab_alloc(s, flags, -1, __builtin_return_address(0)); 2373 return s;
2270 return ZERO_SIZE_PTR; 2374
2375 return slab_alloc(s, flags, -1, __builtin_return_address(0));
2271} 2376}
2272EXPORT_SYMBOL(__kmalloc); 2377EXPORT_SYMBOL(__kmalloc);
2273 2378
@@ -2276,9 +2381,10 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2276{ 2381{
2277 struct kmem_cache *s = get_slab(size, flags); 2382 struct kmem_cache *s = get_slab(size, flags);
2278 2383
2279 if (s) 2384 if (ZERO_OR_NULL_PTR(s))
2280 return slab_alloc(s, flags, node, __builtin_return_address(0)); 2385 return s;
2281 return ZERO_SIZE_PTR; 2386
2387 return slab_alloc(s, flags, node, __builtin_return_address(0));
2282} 2388}
2283EXPORT_SYMBOL(__kmalloc_node); 2389EXPORT_SYMBOL(__kmalloc_node);
2284#endif 2390#endif
@@ -2329,7 +2435,7 @@ void kfree(const void *x)
2329 * this comparison would be true for all "negative" pointers 2435 * this comparison would be true for all "negative" pointers
2330 * (which would cover the whole upper half of the address space). 2436 * (which would cover the whole upper half of the address space).
2331 */ 2437 */
2332 if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR) 2438 if (ZERO_OR_NULL_PTR(x))
2333 return; 2439 return;
2334 2440
2335 page = virt_to_head_page(x); 2441 page = virt_to_head_page(x);
@@ -2418,43 +2524,6 @@ int kmem_cache_shrink(struct kmem_cache *s)
2418} 2524}
2419EXPORT_SYMBOL(kmem_cache_shrink); 2525EXPORT_SYMBOL(kmem_cache_shrink);
2420 2526
2421/**
2422 * krealloc - reallocate memory. The contents will remain unchanged.
2423 * @p: object to reallocate memory for.
2424 * @new_size: how many bytes of memory are required.
2425 * @flags: the type of memory to allocate.
2426 *
2427 * The contents of the object pointed to are preserved up to the
2428 * lesser of the new and old sizes. If @p is %NULL, krealloc()
2429 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
2430 * %NULL pointer, the object pointed to is freed.
2431 */
2432void *krealloc(const void *p, size_t new_size, gfp_t flags)
2433{
2434 void *ret;
2435 size_t ks;
2436
2437 if (unlikely(!p || p == ZERO_SIZE_PTR))
2438 return kmalloc(new_size, flags);
2439
2440 if (unlikely(!new_size)) {
2441 kfree(p);
2442 return ZERO_SIZE_PTR;
2443 }
2444
2445 ks = ksize(p);
2446 if (ks >= new_size)
2447 return (void *)p;
2448
2449 ret = kmalloc(new_size, flags);
2450 if (ret) {
2451 memcpy(ret, p, min(new_size, ks));
2452 kfree(p);
2453 }
2454 return ret;
2455}
2456EXPORT_SYMBOL(krealloc);
2457
2458/******************************************************************** 2527/********************************************************************
2459 * Basic setup of slabs 2528 * Basic setup of slabs
2460 *******************************************************************/ 2529 *******************************************************************/
@@ -2497,6 +2566,24 @@ void __init kmem_cache_init(void)
2497 caches++; 2566 caches++;
2498 } 2567 }
2499 2568
2569
2570 /*
2571 * Patch up the size_index table if we have strange large alignment
2572 * requirements for the kmalloc array. This is only the case for
2573 * mips it seems. The standard arches will not generate any code here.
2574 *
2575 * Largest permitted alignment is 256 bytes due to the way we
2576 * handle the index determination for the smaller caches.
2577 *
2578 * Make sure that nothing crazy happens if someone starts tinkering
2579 * around with ARCH_KMALLOC_MINALIGN
2580 */
2581 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2582 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2583
2584 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
2585 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2586
2500 slab_state = UP; 2587 slab_state = UP;
2501 2588
2502 /* Provide the correct kmalloc names now that the caches are up */ 2589 /* Provide the correct kmalloc names now that the caches are up */
@@ -2542,7 +2629,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2542 size_t align, unsigned long flags, 2629 size_t align, unsigned long flags,
2543 void (*ctor)(void *, struct kmem_cache *, unsigned long)) 2630 void (*ctor)(void *, struct kmem_cache *, unsigned long))
2544{ 2631{
2545 struct list_head *h; 2632 struct kmem_cache *s;
2546 2633
2547 if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) 2634 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2548 return NULL; 2635 return NULL;
@@ -2554,10 +2641,7 @@ static struct kmem_cache *find_mergeable(size_t size,
2554 align = calculate_alignment(flags, align, size); 2641 align = calculate_alignment(flags, align, size);
2555 size = ALIGN(size, align); 2642 size = ALIGN(size, align);
2556 2643
2557 list_for_each(h, &slab_caches) { 2644 list_for_each_entry(s, &slab_caches, list) {
2558 struct kmem_cache *s =
2559 container_of(h, struct kmem_cache, list);
2560
2561 if (slab_unmergeable(s)) 2645 if (slab_unmergeable(s))
2562 continue; 2646 continue;
2563 2647
@@ -2600,25 +2684,26 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2600 */ 2684 */
2601 s->objsize = max(s->objsize, (int)size); 2685 s->objsize = max(s->objsize, (int)size);
2602 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); 2686 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
2687 up_write(&slub_lock);
2603 if (sysfs_slab_alias(s, name)) 2688 if (sysfs_slab_alias(s, name))
2604 goto err; 2689 goto err;
2605 } else { 2690 return s;
2606 s = kmalloc(kmem_size, GFP_KERNEL); 2691 }
2607 if (s && kmem_cache_open(s, GFP_KERNEL, name, 2692 s = kmalloc(kmem_size, GFP_KERNEL);
2693 if (s) {
2694 if (kmem_cache_open(s, GFP_KERNEL, name,
2608 size, align, flags, ctor)) { 2695 size, align, flags, ctor)) {
2609 if (sysfs_slab_add(s)) {
2610 kfree(s);
2611 goto err;
2612 }
2613 list_add(&s->list, &slab_caches); 2696 list_add(&s->list, &slab_caches);
2614 } else 2697 up_write(&slub_lock);
2615 kfree(s); 2698 if (sysfs_slab_add(s))
2699 goto err;
2700 return s;
2701 }
2702 kfree(s);
2616 } 2703 }
2617 up_write(&slub_lock); 2704 up_write(&slub_lock);
2618 return s;
2619 2705
2620err: 2706err:
2621 up_write(&slub_lock);
2622 if (flags & SLAB_PANIC) 2707 if (flags & SLAB_PANIC)
2623 panic("Cannot create slabcache %s\n", name); 2708 panic("Cannot create slabcache %s\n", name);
2624 else 2709 else
@@ -2627,45 +2712,7 @@ err:
2627} 2712}
2628EXPORT_SYMBOL(kmem_cache_create); 2713EXPORT_SYMBOL(kmem_cache_create);
2629 2714
2630void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2631{
2632 void *x;
2633
2634 x = slab_alloc(s, flags, -1, __builtin_return_address(0));
2635 if (x)
2636 memset(x, 0, s->objsize);
2637 return x;
2638}
2639EXPORT_SYMBOL(kmem_cache_zalloc);
2640
2641#ifdef CONFIG_SMP 2715#ifdef CONFIG_SMP
2642static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
2643{
2644 struct list_head *h;
2645
2646 down_read(&slub_lock);
2647 list_for_each(h, &slab_caches) {
2648 struct kmem_cache *s =
2649 container_of(h, struct kmem_cache, list);
2650
2651 func(s, cpu);
2652 }
2653 up_read(&slub_lock);
2654}
2655
2656/*
2657 * Version of __flush_cpu_slab for the case that interrupts
2658 * are enabled.
2659 */
2660static void cpu_slab_flush(struct kmem_cache *s, int cpu)
2661{
2662 unsigned long flags;
2663
2664 local_irq_save(flags);
2665 __flush_cpu_slab(s, cpu);
2666 local_irq_restore(flags);
2667}
2668
2669/* 2716/*
2670 * Use the cpu notifier to insure that the cpu slabs are flushed when 2717 * Use the cpu notifier to insure that the cpu slabs are flushed when
2671 * necessary. 2718 * necessary.
@@ -2674,13 +2721,21 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2674 unsigned long action, void *hcpu) 2721 unsigned long action, void *hcpu)
2675{ 2722{
2676 long cpu = (long)hcpu; 2723 long cpu = (long)hcpu;
2724 struct kmem_cache *s;
2725 unsigned long flags;
2677 2726
2678 switch (action) { 2727 switch (action) {
2679 case CPU_UP_CANCELED: 2728 case CPU_UP_CANCELED:
2680 case CPU_UP_CANCELED_FROZEN: 2729 case CPU_UP_CANCELED_FROZEN:
2681 case CPU_DEAD: 2730 case CPU_DEAD:
2682 case CPU_DEAD_FROZEN: 2731 case CPU_DEAD_FROZEN:
2683 for_all_slabs(cpu_slab_flush, cpu); 2732 down_read(&slub_lock);
2733 list_for_each_entry(s, &slab_caches, list) {
2734 local_irq_save(flags);
2735 __flush_cpu_slab(s, cpu);
2736 local_irq_restore(flags);
2737 }
2738 up_read(&slub_lock);
2684 break; 2739 break;
2685 default: 2740 default:
2686 break; 2741 break;
@@ -2697,8 +2752,8 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2697{ 2752{
2698 struct kmem_cache *s = get_slab(size, gfpflags); 2753 struct kmem_cache *s = get_slab(size, gfpflags);
2699 2754
2700 if (!s) 2755 if (ZERO_OR_NULL_PTR(s))
2701 return ZERO_SIZE_PTR; 2756 return s;
2702 2757
2703 return slab_alloc(s, gfpflags, -1, caller); 2758 return slab_alloc(s, gfpflags, -1, caller);
2704} 2759}
@@ -2708,18 +2763,18 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2708{ 2763{
2709 struct kmem_cache *s = get_slab(size, gfpflags); 2764 struct kmem_cache *s = get_slab(size, gfpflags);
2710 2765
2711 if (!s) 2766 if (ZERO_OR_NULL_PTR(s))
2712 return ZERO_SIZE_PTR; 2767 return s;
2713 2768
2714 return slab_alloc(s, gfpflags, node, caller); 2769 return slab_alloc(s, gfpflags, node, caller);
2715} 2770}
2716 2771
2717#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 2772#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
2718static int validate_slab(struct kmem_cache *s, struct page *page) 2773static int validate_slab(struct kmem_cache *s, struct page *page,
2774 unsigned long *map)
2719{ 2775{
2720 void *p; 2776 void *p;
2721 void *addr = page_address(page); 2777 void *addr = page_address(page);
2722 DECLARE_BITMAP(map, s->objects);
2723 2778
2724 if (!check_slab(s, page) || 2779 if (!check_slab(s, page) ||
2725 !on_freelist(s, page, NULL)) 2780 !on_freelist(s, page, NULL))
@@ -2741,10 +2796,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page)
2741 return 1; 2796 return 1;
2742} 2797}
2743 2798
2744static void validate_slab_slab(struct kmem_cache *s, struct page *page) 2799static void validate_slab_slab(struct kmem_cache *s, struct page *page,
2800 unsigned long *map)
2745{ 2801{
2746 if (slab_trylock(page)) { 2802 if (slab_trylock(page)) {
2747 validate_slab(s, page); 2803 validate_slab(s, page, map);
2748 slab_unlock(page); 2804 slab_unlock(page);
2749 } else 2805 } else
2750 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", 2806 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
@@ -2761,7 +2817,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page)
2761 } 2817 }
2762} 2818}
2763 2819
2764static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n) 2820static int validate_slab_node(struct kmem_cache *s,
2821 struct kmem_cache_node *n, unsigned long *map)
2765{ 2822{
2766 unsigned long count = 0; 2823 unsigned long count = 0;
2767 struct page *page; 2824 struct page *page;
@@ -2770,7 +2827,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
2770 spin_lock_irqsave(&n->list_lock, flags); 2827 spin_lock_irqsave(&n->list_lock, flags);
2771 2828
2772 list_for_each_entry(page, &n->partial, lru) { 2829 list_for_each_entry(page, &n->partial, lru) {
2773 validate_slab_slab(s, page); 2830 validate_slab_slab(s, page, map);
2774 count++; 2831 count++;
2775 } 2832 }
2776 if (count != n->nr_partial) 2833 if (count != n->nr_partial)
@@ -2781,7 +2838,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
2781 goto out; 2838 goto out;
2782 2839
2783 list_for_each_entry(page, &n->full, lru) { 2840 list_for_each_entry(page, &n->full, lru) {
2784 validate_slab_slab(s, page); 2841 validate_slab_slab(s, page, map);
2785 count++; 2842 count++;
2786 } 2843 }
2787 if (count != atomic_long_read(&n->nr_slabs)) 2844 if (count != atomic_long_read(&n->nr_slabs))
@@ -2794,17 +2851,23 @@ out:
2794 return count; 2851 return count;
2795} 2852}
2796 2853
2797static unsigned long validate_slab_cache(struct kmem_cache *s) 2854static long validate_slab_cache(struct kmem_cache *s)
2798{ 2855{
2799 int node; 2856 int node;
2800 unsigned long count = 0; 2857 unsigned long count = 0;
2858 unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
2859 sizeof(unsigned long), GFP_KERNEL);
2860
2861 if (!map)
2862 return -ENOMEM;
2801 2863
2802 flush_all(s); 2864 flush_all(s);
2803 for_each_online_node(node) { 2865 for_each_online_node(node) {
2804 struct kmem_cache_node *n = get_node(s, node); 2866 struct kmem_cache_node *n = get_node(s, node);
2805 2867
2806 count += validate_slab_node(s, n); 2868 count += validate_slab_node(s, n, map);
2807 } 2869 }
2870 kfree(map);
2808 return count; 2871 return count;
2809} 2872}
2810 2873
@@ -2893,18 +2956,14 @@ static void free_loc_track(struct loc_track *t)
2893 get_order(sizeof(struct location) * t->max)); 2956 get_order(sizeof(struct location) * t->max));
2894} 2957}
2895 2958
2896static int alloc_loc_track(struct loc_track *t, unsigned long max) 2959static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
2897{ 2960{
2898 struct location *l; 2961 struct location *l;
2899 int order; 2962 int order;
2900 2963
2901 if (!max)
2902 max = PAGE_SIZE / sizeof(struct location);
2903
2904 order = get_order(sizeof(struct location) * max); 2964 order = get_order(sizeof(struct location) * max);
2905 2965
2906 l = (void *)__get_free_pages(GFP_ATOMIC, order); 2966 l = (void *)__get_free_pages(flags, order);
2907
2908 if (!l) 2967 if (!l)
2909 return 0; 2968 return 0;
2910 2969
@@ -2970,7 +3029,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
2970 /* 3029 /*
2971 * Not found. Insert new tracking element. 3030 * Not found. Insert new tracking element.
2972 */ 3031 */
2973 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max)) 3032 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
2974 return 0; 3033 return 0;
2975 3034
2976 l = t->loc + pos; 3035 l = t->loc + pos;
@@ -3013,11 +3072,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
3013{ 3072{
3014 int n = 0; 3073 int n = 0;
3015 unsigned long i; 3074 unsigned long i;
3016 struct loc_track t; 3075 struct loc_track t = { 0, 0, NULL };
3017 int node; 3076 int node;
3018 3077
3019 t.count = 0; 3078 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3020 t.max = 0; 3079 GFP_KERNEL))
3080 return sprintf(buf, "Out of memory\n");
3021 3081
3022 /* Push back cpu slabs */ 3082 /* Push back cpu slabs */
3023 flush_all(s); 3083 flush_all(s);
@@ -3421,11 +3481,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf)
3421static ssize_t validate_store(struct kmem_cache *s, 3481static ssize_t validate_store(struct kmem_cache *s,
3422 const char *buf, size_t length) 3482 const char *buf, size_t length)
3423{ 3483{
3424 if (buf[0] == '1') 3484 int ret = -EINVAL;
3425 validate_slab_cache(s); 3485
3426 else 3486 if (buf[0] == '1') {
3427 return -EINVAL; 3487 ret = validate_slab_cache(s);
3428 return length; 3488 if (ret >= 0)
3489 ret = length;
3490 }
3491 return ret;
3429} 3492}
3430SLAB_ATTR(validate); 3493SLAB_ATTR(validate);
3431 3494
@@ -3579,7 +3642,7 @@ static struct kset_uevent_ops slab_uevent_ops = {
3579 .filter = uevent_filter, 3642 .filter = uevent_filter,
3580}; 3643};
3581 3644
3582decl_subsys(slab, &slab_ktype, &slab_uevent_ops); 3645static decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
3583 3646
3584#define ID_STR_LENGTH 64 3647#define ID_STR_LENGTH 64
3585 3648
@@ -3677,7 +3740,7 @@ struct saved_alias {
3677 struct saved_alias *next; 3740 struct saved_alias *next;
3678}; 3741};
3679 3742
3680struct saved_alias *alias_list; 3743static struct saved_alias *alias_list;
3681 3744
3682static int sysfs_slab_alias(struct kmem_cache *s, const char *name) 3745static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3683{ 3746{
@@ -3705,7 +3768,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3705 3768
3706static int __init slab_sysfs_init(void) 3769static int __init slab_sysfs_init(void)
3707{ 3770{
3708 struct list_head *h; 3771 struct kmem_cache *s;
3709 int err; 3772 int err;
3710 3773
3711 err = subsystem_register(&slab_subsys); 3774 err = subsystem_register(&slab_subsys);
@@ -3716,10 +3779,7 @@ static int __init slab_sysfs_init(void)
3716 3779
3717 slab_state = SYSFS; 3780 slab_state = SYSFS;
3718 3781
3719 list_for_each(h, &slab_caches) { 3782 list_for_each_entry(s, &slab_caches, list) {
3720 struct kmem_cache *s =
3721 container_of(h, struct kmem_cache, list);
3722
3723 err = sysfs_slab_add(s); 3783 err = sysfs_slab_add(s);
3724 BUG_ON(err); 3784 BUG_ON(err);
3725 } 3785 }
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 925d5c50f18d..67daecb6031a 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -334,7 +334,8 @@ struct page *read_swap_cache_async(swp_entry_t entry,
334 * Get a new page to read into from swap. 334 * Get a new page to read into from swap.
335 */ 335 */
336 if (!new_page) { 336 if (!new_page) {
337 new_page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 337 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
338 vma, addr);
338 if (!new_page) 339 if (!new_page)
339 break; /* Out of memory */ 340 break; /* Out of memory */
340 } 341 }
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c994f2d6145..f47e46d1be3b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -100,9 +100,9 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
100 if (PagePrivate(page)) 100 if (PagePrivate(page))
101 do_invalidatepage(page, 0); 101 do_invalidatepage(page, 0);
102 102
103 remove_from_page_cache(page);
103 ClearPageUptodate(page); 104 ClearPageUptodate(page);
104 ClearPageMappedToDisk(page); 105 ClearPageMappedToDisk(page);
105 remove_from_page_cache(page);
106 page_cache_release(page); /* pagecache ref */ 106 page_cache_release(page); /* pagecache ref */
107} 107}
108 108
diff --git a/mm/util.c b/mm/util.c
index ace2aea69f1a..78f3783bdcc8 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -5,20 +5,6 @@
5#include <asm/uaccess.h> 5#include <asm/uaccess.h>
6 6
7/** 7/**
8 * __kzalloc - allocate memory. The memory is set to zero.
9 * @size: how many bytes of memory are required.
10 * @flags: the type of memory to allocate.
11 */
12void *__kzalloc(size_t size, gfp_t flags)
13{
14 void *ret = kmalloc_track_caller(size, flags);
15 if (ret)
16 memset(ret, 0, size);
17 return ret;
18}
19EXPORT_SYMBOL(__kzalloc);
20
21/*
22 * kstrdup - allocate space for and copy an existing string 8 * kstrdup - allocate space for and copy an existing string
23 * 9 *
24 * @s: the string to duplicate 10 * @s: the string to duplicate
@@ -58,6 +44,40 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
58} 44}
59EXPORT_SYMBOL(kmemdup); 45EXPORT_SYMBOL(kmemdup);
60 46
47/**
48 * krealloc - reallocate memory. The contents will remain unchanged.
49 * @p: object to reallocate memory for.
50 * @new_size: how many bytes of memory are required.
51 * @flags: the type of memory to allocate.
52 *
53 * The contents of the object pointed to are preserved up to the
54 * lesser of the new and old sizes. If @p is %NULL, krealloc()
55 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
56 * %NULL pointer, the object pointed to is freed.
57 */
58void *krealloc(const void *p, size_t new_size, gfp_t flags)
59{
60 void *ret;
61 size_t ks;
62
63 if (unlikely(!new_size)) {
64 kfree(p);
65 return ZERO_SIZE_PTR;
66 }
67
68 ks = ksize(p);
69 if (ks >= new_size)
70 return (void *)p;
71
72 ret = kmalloc_track_caller(new_size, flags);
73 if (ret) {
74 memcpy(ret, p, min(new_size, ks));
75 kfree(p);
76 }
77 return ret;
78}
79EXPORT_SYMBOL(krealloc);
80
61/* 81/*
62 * strndup_user - duplicate an existing string from user space 82 * strndup_user - duplicate an existing string from user space
63 * 83 *
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ddf87145cc49..8e05a11155c9 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -432,11 +432,12 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
432 area->nr_pages = nr_pages; 432 area->nr_pages = nr_pages;
433 /* Please note that the recursion is strictly bounded. */ 433 /* Please note that the recursion is strictly bounded. */
434 if (array_size > PAGE_SIZE) { 434 if (array_size > PAGE_SIZE) {
435 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 435 pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
436 PAGE_KERNEL, node);
436 area->flags |= VM_VPAGES; 437 area->flags |= VM_VPAGES;
437 } else { 438 } else {
438 pages = kmalloc_node(array_size, 439 pages = kmalloc_node(array_size,
439 (gfp_mask & GFP_LEVEL_MASK), 440 (gfp_mask & GFP_LEVEL_MASK) | __GFP_ZERO,
440 node); 441 node);
441 } 442 }
442 area->pages = pages; 443 area->pages = pages;
@@ -445,7 +446,6 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
445 kfree(area); 446 kfree(area);
446 return NULL; 447 return NULL;
447 } 448 }
448 memset(area->pages, 0, array_size);
449 449
450 for (i = 0; i < area->nr_pages; i++) { 450 for (i = 0; i < area->nr_pages; i++) {
451 if (node < 0) 451 if (node < 0)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1be5a6376ef0..d419e10e3daa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -66,17 +66,8 @@ struct scan_control {
66 int swappiness; 66 int swappiness;
67 67
68 int all_unreclaimable; 68 int all_unreclaimable;
69};
70 69
71/* 70 int order;
72 * The list of shrinker callbacks used by to apply pressure to
73 * ageable caches.
74 */
75struct shrinker {
76 shrinker_t shrinker;
77 struct list_head list;
78 int seeks; /* seeks to recreate an obj */
79 long nr; /* objs pending delete */
80}; 71};
81 72
82#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 73#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -121,34 +112,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
121/* 112/*
122 * Add a shrinker callback to be called from the vm 113 * Add a shrinker callback to be called from the vm
123 */ 114 */
124struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) 115void register_shrinker(struct shrinker *shrinker)
125{ 116{
126 struct shrinker *shrinker; 117 shrinker->nr = 0;
127 118 down_write(&shrinker_rwsem);
128 shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL); 119 list_add_tail(&shrinker->list, &shrinker_list);
129 if (shrinker) { 120 up_write(&shrinker_rwsem);
130 shrinker->shrinker = theshrinker;
131 shrinker->seeks = seeks;
132 shrinker->nr = 0;
133 down_write(&shrinker_rwsem);
134 list_add_tail(&shrinker->list, &shrinker_list);
135 up_write(&shrinker_rwsem);
136 }
137 return shrinker;
138} 121}
139EXPORT_SYMBOL(set_shrinker); 122EXPORT_SYMBOL(register_shrinker);
140 123
141/* 124/*
142 * Remove one 125 * Remove one
143 */ 126 */
144void remove_shrinker(struct shrinker *shrinker) 127void unregister_shrinker(struct shrinker *shrinker)
145{ 128{
146 down_write(&shrinker_rwsem); 129 down_write(&shrinker_rwsem);
147 list_del(&shrinker->list); 130 list_del(&shrinker->list);
148 up_write(&shrinker_rwsem); 131 up_write(&shrinker_rwsem);
149 kfree(shrinker);
150} 132}
151EXPORT_SYMBOL(remove_shrinker); 133EXPORT_SYMBOL(unregister_shrinker);
152 134
153#define SHRINK_BATCH 128 135#define SHRINK_BATCH 128
154/* 136/*
@@ -185,7 +167,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
185 list_for_each_entry(shrinker, &shrinker_list, list) { 167 list_for_each_entry(shrinker, &shrinker_list, list) {
186 unsigned long long delta; 168 unsigned long long delta;
187 unsigned long total_scan; 169 unsigned long total_scan;
188 unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); 170 unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
189 171
190 delta = (4 * scanned) / shrinker->seeks; 172 delta = (4 * scanned) / shrinker->seeks;
191 delta *= max_pass; 173 delta *= max_pass;
@@ -213,8 +195,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
213 int shrink_ret; 195 int shrink_ret;
214 int nr_before; 196 int nr_before;
215 197
216 nr_before = (*shrinker->shrinker)(0, gfp_mask); 198 nr_before = (*shrinker->shrink)(0, gfp_mask);
217 shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); 199 shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
218 if (shrink_ret == -1) 200 if (shrink_ret == -1)
219 break; 201 break;
220 if (shrink_ret < nr_before) 202 if (shrink_ret < nr_before)
@@ -481,7 +463,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
481 463
482 referenced = page_referenced(page, 1); 464 referenced = page_referenced(page, 1);
483 /* In active use or really unfreeable? Activate it. */ 465 /* In active use or really unfreeable? Activate it. */
484 if (referenced && page_mapping_inuse(page)) 466 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
467 referenced && page_mapping_inuse(page))
485 goto activate_locked; 468 goto activate_locked;
486 469
487#ifdef CONFIG_SWAP 470#ifdef CONFIG_SWAP
@@ -514,7 +497,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
514 } 497 }
515 498
516 if (PageDirty(page)) { 499 if (PageDirty(page)) {
517 if (referenced) 500 if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
518 goto keep_locked; 501 goto keep_locked;
519 if (!may_enter_fs) 502 if (!may_enter_fs)
520 goto keep_locked; 503 goto keep_locked;
@@ -598,6 +581,51 @@ keep:
598 return nr_reclaimed; 581 return nr_reclaimed;
599} 582}
600 583
584/* LRU Isolation modes. */
585#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
586#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
587#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
588
589/*
590 * Attempt to remove the specified page from its LRU. Only take this page
591 * if it is of the appropriate PageActive status. Pages which are being
592 * freed elsewhere are also ignored.
593 *
594 * page: page to consider
595 * mode: one of the LRU isolation modes defined above
596 *
597 * returns 0 on success, -ve errno on failure.
598 */
599static int __isolate_lru_page(struct page *page, int mode)
600{
601 int ret = -EINVAL;
602
603 /* Only take pages on the LRU. */
604 if (!PageLRU(page))
605 return ret;
606
607 /*
608 * When checking the active state, we need to be sure we are
609 * dealing with comparible boolean values. Take the logical not
610 * of each.
611 */
612 if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
613 return ret;
614
615 ret = -EBUSY;
616 if (likely(get_page_unless_zero(page))) {
617 /*
618 * Be careful not to clear PageLRU until after we're
619 * sure the page is not being freed elsewhere -- the
620 * page release code relies on it.
621 */
622 ClearPageLRU(page);
623 ret = 0;
624 }
625
626 return ret;
627}
628
601/* 629/*
602 * zone->lru_lock is heavily contended. Some of the functions that 630 * zone->lru_lock is heavily contended. Some of the functions that
603 * shrink the lists perform better by taking out a batch of pages 631 * shrink the lists perform better by taking out a batch of pages
@@ -612,38 +640,90 @@ keep:
612 * @src: The LRU list to pull pages off. 640 * @src: The LRU list to pull pages off.
613 * @dst: The temp list to put pages on to. 641 * @dst: The temp list to put pages on to.
614 * @scanned: The number of pages that were scanned. 642 * @scanned: The number of pages that were scanned.
643 * @order: The caller's attempted allocation order
644 * @mode: One of the LRU isolation modes
615 * 645 *
616 * returns how many pages were moved onto *@dst. 646 * returns how many pages were moved onto *@dst.
617 */ 647 */
618static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 648static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
619 struct list_head *src, struct list_head *dst, 649 struct list_head *src, struct list_head *dst,
620 unsigned long *scanned) 650 unsigned long *scanned, int order, int mode)
621{ 651{
622 unsigned long nr_taken = 0; 652 unsigned long nr_taken = 0;
623 struct page *page;
624 unsigned long scan; 653 unsigned long scan;
625 654
626 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 655 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
627 struct list_head *target; 656 struct page *page;
657 unsigned long pfn;
658 unsigned long end_pfn;
659 unsigned long page_pfn;
660 int zone_id;
661
628 page = lru_to_page(src); 662 page = lru_to_page(src);
629 prefetchw_prev_lru_page(page, src, flags); 663 prefetchw_prev_lru_page(page, src, flags);
630 664
631 VM_BUG_ON(!PageLRU(page)); 665 VM_BUG_ON(!PageLRU(page));
632 666
633 list_del(&page->lru); 667 switch (__isolate_lru_page(page, mode)) {
634 target = src; 668 case 0:
635 if (likely(get_page_unless_zero(page))) { 669 list_move(&page->lru, dst);
636 /*
637 * Be careful not to clear PageLRU until after we're
638 * sure the page is not being freed elsewhere -- the
639 * page release code relies on it.
640 */
641 ClearPageLRU(page);
642 target = dst;
643 nr_taken++; 670 nr_taken++;
644 } /* else it is being freed elsewhere */ 671 break;
672
673 case -EBUSY:
674 /* else it is being freed elsewhere */
675 list_move(&page->lru, src);
676 continue;
677
678 default:
679 BUG();
680 }
681
682 if (!order)
683 continue;
645 684
646 list_add(&page->lru, target); 685 /*
686 * Attempt to take all pages in the order aligned region
687 * surrounding the tag page. Only take those pages of
688 * the same active state as that tag page. We may safely
689 * round the target page pfn down to the requested order
690 * as the mem_map is guarenteed valid out to MAX_ORDER,
691 * where that page is in a different zone we will detect
692 * it from its zone id and abort this block scan.
693 */
694 zone_id = page_zone_id(page);
695 page_pfn = page_to_pfn(page);
696 pfn = page_pfn & ~((1 << order) - 1);
697 end_pfn = pfn + (1 << order);
698 for (; pfn < end_pfn; pfn++) {
699 struct page *cursor_page;
700
701 /* The target page is in the block, ignore it. */
702 if (unlikely(pfn == page_pfn))
703 continue;
704
705 /* Avoid holes within the zone. */
706 if (unlikely(!pfn_valid_within(pfn)))
707 break;
708
709 cursor_page = pfn_to_page(pfn);
710 /* Check that we have not crossed a zone boundary. */
711 if (unlikely(page_zone_id(cursor_page) != zone_id))
712 continue;
713 switch (__isolate_lru_page(cursor_page, mode)) {
714 case 0:
715 list_move(&cursor_page->lru, dst);
716 nr_taken++;
717 scan++;
718 break;
719
720 case -EBUSY:
721 /* else it is being freed elsewhere */
722 list_move(&cursor_page->lru, src);
723 default:
724 break;
725 }
726 }
647 } 727 }
648 728
649 *scanned = scan; 729 *scanned = scan;
@@ -651,6 +731,24 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
651} 731}
652 732
653/* 733/*
734 * clear_active_flags() is a helper for shrink_active_list(), clearing
735 * any active bits from the pages in the list.
736 */
737static unsigned long clear_active_flags(struct list_head *page_list)
738{
739 int nr_active = 0;
740 struct page *page;
741
742 list_for_each_entry(page, page_list, lru)
743 if (PageActive(page)) {
744 ClearPageActive(page);
745 nr_active++;
746 }
747
748 return nr_active;
749}
750
751/*
654 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 752 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
655 * of reclaimed pages 753 * of reclaimed pages
656 */ 754 */
@@ -671,11 +769,18 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
671 unsigned long nr_taken; 769 unsigned long nr_taken;
672 unsigned long nr_scan; 770 unsigned long nr_scan;
673 unsigned long nr_freed; 771 unsigned long nr_freed;
772 unsigned long nr_active;
674 773
675 nr_taken = isolate_lru_pages(sc->swap_cluster_max, 774 nr_taken = isolate_lru_pages(sc->swap_cluster_max,
676 &zone->inactive_list, 775 &zone->inactive_list,
677 &page_list, &nr_scan); 776 &page_list, &nr_scan, sc->order,
678 __mod_zone_page_state(zone, NR_INACTIVE, -nr_taken); 777 (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
778 ISOLATE_BOTH : ISOLATE_INACTIVE);
779 nr_active = clear_active_flags(&page_list);
780
781 __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
782 __mod_zone_page_state(zone, NR_INACTIVE,
783 -(nr_taken - nr_active));
679 zone->pages_scanned += nr_scan; 784 zone->pages_scanned += nr_scan;
680 spin_unlock_irq(&zone->lru_lock); 785 spin_unlock_irq(&zone->lru_lock);
681 786
@@ -820,7 +925,7 @@ force_reclaim_mapped:
820 lru_add_drain(); 925 lru_add_drain();
821 spin_lock_irq(&zone->lru_lock); 926 spin_lock_irq(&zone->lru_lock);
822 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, 927 pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
823 &l_hold, &pgscanned); 928 &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
824 zone->pages_scanned += pgscanned; 929 zone->pages_scanned += pgscanned;
825 __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved); 930 __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
826 spin_unlock_irq(&zone->lru_lock); 931 spin_unlock_irq(&zone->lru_lock);
@@ -1011,7 +1116,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
1011 * holds filesystem locks which prevent writeout this might not work, and the 1116 * holds filesystem locks which prevent writeout this might not work, and the
1012 * allocation attempt will fail. 1117 * allocation attempt will fail.
1013 */ 1118 */
1014unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) 1119unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
1015{ 1120{
1016 int priority; 1121 int priority;
1017 int ret = 0; 1122 int ret = 0;
@@ -1026,6 +1131,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1026 .swap_cluster_max = SWAP_CLUSTER_MAX, 1131 .swap_cluster_max = SWAP_CLUSTER_MAX,
1027 .may_swap = 1, 1132 .may_swap = 1,
1028 .swappiness = vm_swappiness, 1133 .swappiness = vm_swappiness,
1134 .order = order,
1029 }; 1135 };
1030 1136
1031 count_vm_event(ALLOCSTALL); 1137 count_vm_event(ALLOCSTALL);
@@ -1131,6 +1237,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1131 .may_swap = 1, 1237 .may_swap = 1,
1132 .swap_cluster_max = SWAP_CLUSTER_MAX, 1238 .swap_cluster_max = SWAP_CLUSTER_MAX,
1133 .swappiness = vm_swappiness, 1239 .swappiness = vm_swappiness,
1240 .order = order,
1134 }; 1241 };
1135 /* 1242 /*
1136 * temp_priority is used to remember the scanning priority at which 1243 * temp_priority is used to remember the scanning priority at which
@@ -1314,6 +1421,7 @@ static int kswapd(void *p)
1314 * trying to free the first piece of memory in the first place). 1421 * trying to free the first piece of memory in the first place).
1315 */ 1422 */
1316 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; 1423 tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1424 set_freezable();
1317 1425
1318 order = 0; 1426 order = 0;
1319 for ( ; ; ) { 1427 for ( ; ; ) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index eceaf496210f..fadf791cd7e6 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -472,7 +472,7 @@ const struct seq_operations fragmentation_op = {
472#endif 472#endif
473 473
474#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 474#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
475 TEXT_FOR_HIGHMEM(xx) 475 TEXT_FOR_HIGHMEM(xx) xx "_movable",
476 476
477static const char * const vmstat_text[] = { 477static const char * const vmstat_text[] = {
478 /* Zoned VM counters */ 478 /* Zoned VM counters */
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 1c8f4a0c5f43..1f78c3e336d8 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
36#include <linux/signal.h> 36#include <linux/signal.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/freezer.h>
39#include <linux/errno.h> 40#include <linux/errno.h>
40#include <linux/net.h> 41#include <linux/net.h>
41#include <net/sock.h> 42#include <net/sock.h>
@@ -474,7 +475,6 @@ static int bnep_session(void *arg)
474 475
475 daemonize("kbnepd %s", dev->name); 476 daemonize("kbnepd %s", dev->name);
476 set_user_nice(current, -15); 477 set_user_nice(current, -15);
477 current->flags |= PF_NOFREEZE;
478 478
479 init_waitqueue_entry(&wait, current); 479 init_waitqueue_entry(&wait, current);
480 add_wait_queue(sk->sk_sleep, &wait); 480 add_wait_queue(sk->sk_sleep, &wait);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 66bef1ccee2a..ca60a4517fd3 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -29,6 +29,7 @@
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/poll.h> 30#include <linux/poll.h>
31#include <linux/fcntl.h> 31#include <linux/fcntl.h>
32#include <linux/freezer.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/socket.h> 34#include <linux/socket.h>
34#include <linux/ioctl.h> 35#include <linux/ioctl.h>
@@ -287,7 +288,6 @@ static int cmtp_session(void *arg)
287 288
288 daemonize("kcmtpd_ctr_%d", session->num); 289 daemonize("kcmtpd_ctr_%d", session->num);
289 set_user_nice(current, -15); 290 set_user_nice(current, -15);
290 current->flags |= PF_NOFREEZE;
291 291
292 init_waitqueue_entry(&wait, current); 292 init_waitqueue_entry(&wait, current);
293 add_wait_queue(sk->sk_sleep, &wait); 293 add_wait_queue(sk->sk_sleep, &wait);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 450eb0244bbf..64d89ca28847 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -28,6 +28,7 @@
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/poll.h> 30#include <linux/poll.h>
31#include <linux/freezer.h>
31#include <linux/fcntl.h> 32#include <linux/fcntl.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
33#include <linux/socket.h> 34#include <linux/socket.h>
@@ -547,7 +548,6 @@ static int hidp_session(void *arg)
547 548
548 daemonize("khidpd_%04x%04x", vendor, product); 549 daemonize("khidpd_%04x%04x", vendor, product);
549 set_user_nice(current, -15); 550 set_user_nice(current, -15);
550 current->flags |= PF_NOFREEZE;
551 551
552 init_waitqueue_entry(&ctrl_wait, current); 552 init_waitqueue_entry(&ctrl_wait, current);
553 init_waitqueue_entry(&intr_wait, current); 553 init_waitqueue_entry(&intr_wait, current);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 52e04df323ea..bb7220770f2c 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -33,6 +33,7 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/signal.h> 34#include <linux/signal.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/freezer.h>
36#include <linux/wait.h> 37#include <linux/wait.h>
37#include <linux/device.h> 38#include <linux/device.h>
38#include <linux/net.h> 39#include <linux/net.h>
@@ -1940,7 +1941,6 @@ static int rfcomm_run(void *unused)
1940 1941
1941 daemonize("krfcommd"); 1942 daemonize("krfcommd");
1942 set_user_nice(current, -10); 1943 set_user_nice(current, -10);
1943 current->flags |= PF_NOFREEZE;
1944 1944
1945 BT_DBG(""); 1945 BT_DBG("");
1946 1946
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 75215331b045..bca787fdbc51 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3465,6 +3465,8 @@ static int pktgen_thread_worker(void *arg)
3465 3465
3466 set_current_state(TASK_INTERRUPTIBLE); 3466 set_current_state(TASK_INTERRUPTIBLE);
3467 3467
3468 set_freezable();
3469
3468 while (!kthread_should_stop()) { 3470 while (!kthread_should_stop()) {
3469 pkt_dev = next_to_run(t); 3471 pkt_dev = next_to_run(t);
3470 3472
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index aa55d0a03e6f..29a8ecc60928 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -543,17 +543,18 @@ rpcauth_uptodatecred(struct rpc_task *task)
543 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0; 543 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
544} 544}
545 545
546 546static struct shrinker rpc_cred_shrinker = {
547static struct shrinker *rpc_cred_shrinker; 547 .shrink = rpcauth_cache_shrinker,
548 .seeks = DEFAULT_SEEKS,
549};
548 550
549void __init rpcauth_init_module(void) 551void __init rpcauth_init_module(void)
550{ 552{
551 rpc_init_authunix(); 553 rpc_init_authunix();
552 rpc_cred_shrinker = set_shrinker(DEFAULT_SEEKS, rpcauth_cache_shrinker); 554 register_shrinker(&rpc_cred_shrinker);
553} 555}
554 556
555void __exit rpcauth_remove_module(void) 557void __exit rpcauth_remove_module(void)
556{ 558{
557 if (rpc_cred_shrinker != NULL) 559 unregister_shrinker(&rpc_cred_shrinker);
558 remove_shrinker(rpc_cred_shrinker);
559} 560}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 71b9daefdff3..9843eacef11d 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -231,6 +231,7 @@ static struct pf_desc gss_kerberos_pfs[] = {
231static struct gss_api_mech gss_kerberos_mech = { 231static struct gss_api_mech gss_kerberos_mech = {
232 .gm_name = "krb5", 232 .gm_name = "krb5",
233 .gm_owner = THIS_MODULE, 233 .gm_owner = THIS_MODULE,
234 .gm_oid = {9, (void *)"\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"},
234 .gm_ops = &gss_kerberos_ops, 235 .gm_ops = &gss_kerberos_ops,
235 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 236 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
236 .gm_pfs = gss_kerberos_pfs, 237 .gm_pfs = gss_kerberos_pfs,
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 26872517ccf3..61801a069ff0 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -194,6 +194,20 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
194EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor); 194EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
195 195
196u32 196u32
197gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
198{
199 int i;
200
201 for (i = 0; i < gm->gm_pf_num; i++) {
202 if (gm->gm_pfs[i].service == service) {
203 return gm->gm_pfs[i].pseudoflavor;
204 }
205 }
206 return RPC_AUTH_MAXFLAVOR; /* illegal value */
207}
208EXPORT_SYMBOL(gss_svc_to_pseudoflavor);
209
210u32
197gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor) 211gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
198{ 212{
199 int i; 213 int i;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 577d590e755f..5deb4b6e4514 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -217,6 +217,7 @@ static struct pf_desc gss_spkm3_pfs[] = {
217static struct gss_api_mech gss_spkm3_mech = { 217static struct gss_api_mech gss_spkm3_mech = {
218 .gm_name = "spkm3", 218 .gm_name = "spkm3",
219 .gm_owner = THIS_MODULE, 219 .gm_owner = THIS_MODULE,
220 .gm_oid = {7, "\053\006\001\005\005\001\003"},
220 .gm_ops = &gss_spkm3_ops, 221 .gm_ops = &gss_spkm3_ops,
221 .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs), 222 .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs),
222 .gm_pfs = gss_spkm3_pfs, 223 .gm_pfs = gss_spkm3_pfs,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c094583386fd..490697542fc2 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -743,6 +743,15 @@ find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
743 743
744static struct auth_ops svcauthops_gss; 744static struct auth_ops svcauthops_gss;
745 745
746u32 svcauth_gss_flavor(struct auth_domain *dom)
747{
748 struct gss_domain *gd = container_of(dom, struct gss_domain, h);
749
750 return gd->pseudoflavor;
751}
752
753EXPORT_SYMBOL(svcauth_gss_flavor);
754
746int 755int
747svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) 756svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
748{ 757{
@@ -913,10 +922,23 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
913 struct gss_svc_data *svcdata = rqstp->rq_auth_data; 922 struct gss_svc_data *svcdata = rqstp->rq_auth_data;
914 struct rsc *rsci = svcdata->rsci; 923 struct rsc *rsci = svcdata->rsci;
915 struct rpc_gss_wire_cred *gc = &svcdata->clcred; 924 struct rpc_gss_wire_cred *gc = &svcdata->clcred;
925 int stat;
916 926
917 rqstp->rq_client = find_gss_auth_domain(rsci->mechctx, gc->gc_svc); 927 /*
918 if (rqstp->rq_client == NULL) 928 * A gss export can be specified either by:
929 * export *(sec=krb5,rw)
930 * or by
931 * export gss/krb5(rw)
932 * The latter is deprecated; but for backwards compatibility reasons
933 * the nfsd code will still fall back on trying it if the former
934 * doesn't work; so we try to make both available to nfsd, below.
935 */
936 rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
937 if (rqstp->rq_gssclient == NULL)
919 return SVC_DENIED; 938 return SVC_DENIED;
939 stat = svcauth_unix_set_client(rqstp);
940 if (stat == SVC_DROP)
941 return stat;
920 return SVC_OK; 942 return SVC_OK;
921} 943}
922 944
@@ -1088,7 +1110,6 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1088 svc_putnl(resv, GSS_SEQ_WIN); 1110 svc_putnl(resv, GSS_SEQ_WIN);
1089 if (svc_safe_putnetobj(resv, &rsip->out_token)) 1111 if (svc_safe_putnetobj(resv, &rsip->out_token))
1090 goto drop; 1112 goto drop;
1091 rqstp->rq_client = NULL;
1092 } 1113 }
1093 goto complete; 1114 goto complete;
1094 case RPC_GSS_PROC_DESTROY: 1115 case RPC_GSS_PROC_DESTROY:
@@ -1131,6 +1152,8 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1131 } 1152 }
1132 svcdata->rsci = rsci; 1153 svcdata->rsci = rsci;
1133 cache_get(&rsci->h); 1154 cache_get(&rsci->h);
1155 rqstp->rq_flavor = gss_svc_to_pseudoflavor(
1156 rsci->mechctx->mech_type, gc->gc_svc);
1134 ret = SVC_OK; 1157 ret = SVC_OK;
1135 goto out; 1158 goto out;
1136 } 1159 }
@@ -1317,6 +1340,9 @@ out_err:
1317 if (rqstp->rq_client) 1340 if (rqstp->rq_client)
1318 auth_domain_put(rqstp->rq_client); 1341 auth_domain_put(rqstp->rq_client);
1319 rqstp->rq_client = NULL; 1342 rqstp->rq_client = NULL;
1343 if (rqstp->rq_gssclient)
1344 auth_domain_put(rqstp->rq_gssclient);
1345 rqstp->rq_gssclient = NULL;
1320 if (rqstp->rq_cred.cr_group_info) 1346 if (rqstp->rq_cred.cr_group_info)
1321 put_group_info(rqstp->rq_cred.cr_group_info); 1347 put_group_info(rqstp->rq_cred.cr_group_info);
1322 rqstp->rq_cred.cr_group_info = NULL; 1348 rqstp->rq_cred.cr_group_info = NULL;
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 07dcd20cbee4..411479411b21 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -5,6 +5,7 @@
5#include <linux/sunrpc/xdr.h> 5#include <linux/sunrpc/xdr.h>
6#include <linux/sunrpc/svcsock.h> 6#include <linux/sunrpc/svcsock.h>
7#include <linux/sunrpc/svcauth.h> 7#include <linux/sunrpc/svcauth.h>
8#include <linux/sunrpc/gss_api.h>
8#include <linux/err.h> 9#include <linux/err.h>
9#include <linux/seq_file.h> 10#include <linux/seq_file.h>
10#include <linux/hash.h> 11#include <linux/hash.h>
@@ -637,7 +638,7 @@ static int unix_gid_find(uid_t uid, struct group_info **gip,
637 } 638 }
638} 639}
639 640
640static int 641int
641svcauth_unix_set_client(struct svc_rqst *rqstp) 642svcauth_unix_set_client(struct svc_rqst *rqstp)
642{ 643{
643 struct sockaddr_in *sin = svc_addr_in(rqstp); 644 struct sockaddr_in *sin = svc_addr_in(rqstp);
@@ -672,6 +673,8 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
672 return SVC_OK; 673 return SVC_OK;
673} 674}
674 675
676EXPORT_SYMBOL(svcauth_unix_set_client);
677
675static int 678static int
676svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) 679svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
677{ 680{
@@ -707,6 +710,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
707 svc_putnl(resv, RPC_AUTH_NULL); 710 svc_putnl(resv, RPC_AUTH_NULL);
708 svc_putnl(resv, 0); 711 svc_putnl(resv, 0);
709 712
713 rqstp->rq_flavor = RPC_AUTH_NULL;
710 return SVC_OK; 714 return SVC_OK;
711} 715}
712 716
@@ -784,6 +788,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
784 svc_putnl(resv, RPC_AUTH_NULL); 788 svc_putnl(resv, RPC_AUTH_NULL);
785 svc_putnl(resv, 0); 789 svc_putnl(resv, 0);
786 790
791 rqstp->rq_flavor = RPC_AUTH_UNIX;
787 return SVC_OK; 792 return SVC_OK;
788 793
789badcred: 794badcred:
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 8b809b264d18..10b006694e5d 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -31,7 +31,7 @@
31#include <string.h> 31#include <string.h>
32#include <ctype.h> 32#include <ctype.h>
33 33
34#define KSYM_NAME_LEN 127 34#define KSYM_NAME_LEN 128
35 35
36 36
37struct sym_entry { 37struct sym_entry {
@@ -254,7 +254,7 @@ static void write_src(void)
254 unsigned int i, k, off; 254 unsigned int i, k, off;
255 unsigned int best_idx[256]; 255 unsigned int best_idx[256];
256 unsigned int *markers; 256 unsigned int *markers;
257 char buf[KSYM_NAME_LEN+1]; 257 char buf[KSYM_NAME_LEN];
258 258
259 printf("#include <asm/types.h>\n"); 259 printf("#include <asm/types.h>\n");
260 printf("#if BITS_PER_LONG == 64\n"); 260 printf("#if BITS_PER_LONG == 64\n");
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 78c3f98fcdcf..520b9998123e 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2318,7 +2318,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value
2318 if (sbsec->behavior == SECURITY_FS_USE_MNTPOINT) 2318 if (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)
2319 return -EOPNOTSUPP; 2319 return -EOPNOTSUPP;
2320 2320
2321 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 2321 if (!is_owner_or_cap(inode))
2322 return -EPERM; 2322 return -EPERM;
2323 2323
2324 AVC_AUDIT_DATA_INIT(&ad,FS); 2324 AVC_AUDIT_DATA_INIT(&ad,FS);
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 314477909f82..866d4de8d4ab 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -5,35 +5,6 @@
5# 5#
6# Prompt user for primary drivers. 6# Prompt user for primary drivers.
7 7
8config OSS_OBSOLETE
9 bool "Obsolete OSS drivers"
10 depends on SOUND_PRIME
11 help
12 This option enables support for obsolete OSS drivers that
13 are scheduled for removal in the near future.
14
15 Please contact Adrian Bunk <bunk@stusta.de> if you had to
16 say Y here because your hardware is not properly supported
17 by ALSA.
18
19 If unsure, say N.
20
21config SOUND_BT878
22 tristate "BT878 audio dma"
23 depends on SOUND_PRIME && PCI && OSS_OBSOLETE
24 ---help---
25 Audio DMA support for bt878 based grabber boards. As you might have
26 already noticed, bt878 is listed with two functions in /proc/pci.
27 Function 0 does the video stuff (bt848 compatible), function 1 does
28 the same for audio data. This is a driver for the audio part of
29 the chip. If you say 'Y' here you get a oss-compatible dsp device
30 where you can record from. If you want just watch TV you probably
31 don't need this driver as most TV cards handle sound with a short
32 cable from the TV card to your sound card's line-in.
33
34 To compile this driver as a module, choose M here: the module will
35 be called btaudio.
36
37config SOUND_BCM_CS4297A 8config SOUND_BCM_CS4297A
38 tristate "Crystal Sound CS4297a (for Swarm)" 9 tristate "Crystal Sound CS4297a (for Swarm)"
39 depends on SOUND_PRIME && SIBYTE_SWARM 10 depends on SOUND_PRIME && SIBYTE_SWARM
@@ -44,13 +15,6 @@ config SOUND_BCM_CS4297A
44 note that CONFIG_KGDB should not be enabled at the same 15 note that CONFIG_KGDB should not be enabled at the same
45 time, since it also attempts to use this UART port. 16 time, since it also attempts to use this UART port.
46 17
47config SOUND_ICH
48 tristate "Intel ICH (i8xx) audio support"
49 depends on SOUND_PRIME && PCI && OSS_OBSOLETE
50 help
51 Support for integral audio in Intel's I/O Controller Hub (ICH)
52 chipset, as used on the 810/820/840 motherboards.
53
54config SOUND_VWSND 18config SOUND_VWSND
55 tristate "SGI Visual Workstation Sound" 19 tristate "SGI Visual Workstation Sound"
56 depends on SOUND_PRIME && X86_VISWS 20 depends on SOUND_PRIME && X86_VISWS
@@ -346,26 +310,6 @@ config MSND_FIFOSIZE
346 and Pinnacle). Larger values reduce the chance of data overruns at 310 and Pinnacle). Larger values reduce the chance of data overruns at
347 the expense of overall latency. If unsure, use the default. 311 the expense of overall latency. If unsure, use the default.
348 312
349config SOUND_VIA82CXXX
350 tristate "VIA 82C686 Audio Codec"
351 depends on SOUND_PRIME && PCI && OSS_OBSOLETE && VIRT_TO_BUS
352 help
353 Say Y here to include support for the audio codec found on VIA
354 82Cxxx-based chips. Typically these are built into a motherboard.
355
356 DO NOT select Sound Blaster or Adlib with this driver, unless
357 you have a Sound Blaster or Adlib card in addition to your VIA
358 audio chip.
359
360config MIDI_VIA82CXXX
361 bool "VIA 82C686 MIDI"
362 depends on SOUND_VIA82CXXX && ISA_DMA_API
363 help
364 Answer Y to use the MIDI interface of the Via686. You may need to
365 enable this in the BIOS before it will work. This is for connection
366 to external MIDI hardware, and is not required for software playback
367 of MIDI files.
368
369config SOUND_OSS 313config SOUND_OSS
370 tristate "OSS sound modules" 314 tristate "OSS sound modules"
371 depends on SOUND_PRIME && ISA_DMA_API && VIRT_TO_BUS 315 depends on SOUND_PRIME && ISA_DMA_API && VIRT_TO_BUS
@@ -400,20 +344,6 @@ config SOUND_DMAP
400 344
401 Say Y unless you have 16MB or more RAM or a PCI sound card. 345 Say Y unless you have 16MB or more RAM or a PCI sound card.
402 346
403config SOUND_CS4232
404 tristate "Crystal CS4232 based (PnP) cards"
405 depends on SOUND_OSS && OSS_OBSOLETE
406 help
407 Say Y here if you have a card based on the Crystal CS4232 chip set,
408 which uses its own Plug and Play protocol.
409
410 If you compile the driver into the kernel, you have to add
411 "cs4232=<io>,<irq>,<dma>,<dma2>,<mpuio>,<mpuirq>" to the kernel
412 command line.
413
414 See <file:Documentation/sound/oss/CS4232> for more information on
415 configuring this card.
416
417config SOUND_SSCAPE 347config SOUND_SSCAPE
418 tristate "Ensoniq SoundScape support" 348 tristate "Ensoniq SoundScape support"
419 depends on SOUND_OSS 349 depends on SOUND_OSS
@@ -720,13 +650,6 @@ config SOUND_WAVEARTIST
720 Say Y here to include support for the Rockwell WaveArtist sound 650 Say Y here to include support for the Rockwell WaveArtist sound
721 system. This driver is mainly for the NetWinder. 651 system. This driver is mainly for the NetWinder.
722 652
723config SOUND_TVMIXER
724 tristate "TV card (bt848) mixer support"
725 depends on SOUND_PRIME && I2C && VIDEO_V4L1 && OSS_OBSOLETE
726 help
727 Support for audio mixer facilities on the BT848 TV frame-grabber
728 card.
729
730config SOUND_KAHLUA 653config SOUND_KAHLUA
731 tristate "XpressAudio Sound Blaster emulation" 654 tristate "XpressAudio Sound Blaster emulation"
732 depends on SOUND_SB 655 depends on SOUND_SB
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index 3bc1f6e9e4a3..96adc47917aa 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -11,12 +11,12 @@
11 * Built from: 11 * Built from:
12 * Low level code: <audio@tridentmicro.com> from ALSA 12 * Low level code: <audio@tridentmicro.com> from ALSA
13 * Framework: Thomas Sailer <sailer@ife.ee.ethz.ch> 13 * Framework: Thomas Sailer <sailer@ife.ee.ethz.ch>
14 * Extended by: Zach Brown <zab@redhat.com> 14 * Extended by: Zach Brown <zab@redhat.com>
15 * 15 *
16 * Hacked up by: 16 * Hacked up by:
17 * Aaron Holtzman <aholtzma@ess.engr.uvic.ca> 17 * Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
18 * Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support 18 * Ollie Lho <ollie@sis.com.tw> SiS 7018 Audio Core Support
19 * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support 19 * Ching-Ling Lee <cling-li@ali.com.tw> ALi 5451 Audio Core Support
20 * Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support 20 * Matt Wu <mattwu@acersoftech.com.cn> ALi 5451 Audio Core Support
21 * Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support 21 * Peter Wächtler <pwaechtler@loewe-komp.de> CyberPro5050 support
22 * Muli Ben-Yehuda <mulix@mulix.org> 22 * Muli Ben-Yehuda <mulix@mulix.org>
@@ -54,33 +54,33 @@
54 * adapt to new pci joystick attachment interface 54 * adapt to new pci joystick attachment interface
55 * v0.14.10f 55 * v0.14.10f
56 * July 24 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 56 * July 24 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
57 * patch from Eric Lemar (via Ian Soboroff): in suspend and resume, 57 * patch from Eric Lemar (via Ian Soboroff): in suspend and resume,
58 * fix wrong cast from pci_dev* to struct trident_card*. 58 * fix wrong cast from pci_dev* to struct trident_card*.
59 * v0.14.10e 59 * v0.14.10e
60 * July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 60 * July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
61 * rewrite the DMA buffer allocation/deallcoation functions, to make it 61 * rewrite the DMA buffer allocation/deallcoation functions, to make it
62 * modular and fix a bug where we would call free_pages on memory 62 * modular and fix a bug where we would call free_pages on memory
63 * obtained with pci_alloc_consistent. Also remove unnecessary #ifdef 63 * obtained with pci_alloc_consistent. Also remove unnecessary #ifdef
64 * CONFIG_PROC_FS and various other cleanups. 64 * CONFIG_PROC_FS and various other cleanups.
65 * v0.14.10d 65 * v0.14.10d
66 * July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 66 * July 19 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
67 * made several printk(KERN_NOTICE...) into TRDBG(...), to avoid spamming 67 * made several printk(KERN_NOTICE...) into TRDBG(...), to avoid spamming
68 * my syslog with hundreds of messages. 68 * my syslog with hundreds of messages.
69 * v0.14.10c 69 * v0.14.10c
70 * July 16 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 70 * July 16 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
71 * Cleaned up Lei Hu's 0.4.10 driver to conform to Documentation/CodingStyle 71 * Cleaned up Lei Hu's 0.4.10 driver to conform to Documentation/CodingStyle
72 * and the coding style used in the rest of the file. 72 * and the coding style used in the rest of the file.
73 * v0.14.10b 73 * v0.14.10b
74 * June 23 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 74 * June 23 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
75 * add a missing unlock_set_fmt, remove a superflous lock/unlock pair 75 * add a missing unlock_set_fmt, remove a superflous lock/unlock pair
76 * with nothing in between. 76 * with nothing in between.
77 * v0.14.10a 77 * v0.14.10a
78 * June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il> 78 * June 21 2002 Muli Ben-Yehuda <mulix@actcom.co.il>
79 * use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns 79 * use a debug macro instead of #ifdef CONFIG_DEBUG, trim to 80 columns
80 * per line, use 'do {} while (0)' in statement macros. 80 * per line, use 'do {} while (0)' in statement macros.
81 * v0.14.10 81 * v0.14.10
82 * June 6 2002 Lei Hu <Lei_hu@ali.com.tw> 82 * June 6 2002 Lei Hu <Lei_hu@ali.com.tw>
83 * rewrite the part to read/write registers of audio codec for Ali5451 83 * rewrite the part to read/write registers of audio codec for Ali5451
84 * v0.14.9e 84 * v0.14.9e
85 * January 2 2002 Vojtech Pavlik <vojtech@ucw.cz> added gameport 85 * January 2 2002 Vojtech Pavlik <vojtech@ucw.cz> added gameport
86 * support to avoid resource conflict with pcigame.c 86 * support to avoid resource conflict with pcigame.c
@@ -111,7 +111,7 @@
111 * Set EBUF1 and EBUF2 to still mode 111 * Set EBUF1 and EBUF2 to still mode
112 * Add dc97/ac97 reset function 112 * Add dc97/ac97 reset function
113 * Fix power management: ali_restore_regs 113 * Fix power management: ali_restore_regs
114 * unreleased 114 * unreleased
115 * Mar 09 2001 Matt Wu 115 * Mar 09 2001 Matt Wu
116 * Add cache for ac97 access 116 * Add cache for ac97 access
117 * v0.14.7 117 * v0.14.7
@@ -120,7 +120,7 @@
120 * Fix bug: an extra tail will be played when playing 120 * Fix bug: an extra tail will be played when playing
121 * Jan 05 2001 Matt Wu 121 * Jan 05 2001 Matt Wu
122 * Implement multi-channels and S/PDIF in support for ALi 1535+ 122 * Implement multi-channels and S/PDIF in support for ALi 1535+
123 * v0.14.6 123 * v0.14.6
124 * Nov 1 2000 Ching-Ling Lee 124 * Nov 1 2000 Ching-Ling Lee
125 * Fix the bug of memory leak when switching 5.1-channels to 2 channels. 125 * Fix the bug of memory leak when switching 5.1-channels to 2 channels.
126 * Add lock protection into dynamic changing format of data. 126 * Add lock protection into dynamic changing format of data.
@@ -138,7 +138,7 @@
138 * v0.14.3 May 10 2000 Ollie Lho 138 * v0.14.3 May 10 2000 Ollie Lho
139 * fixed a small bug in trident_update_ptr, xmms 1.0.1 no longer uses 100% CPU 139 * fixed a small bug in trident_update_ptr, xmms 1.0.1 no longer uses 100% CPU
140 * v0.14.2 Mar 29 2000 Ching-Ling Lee 140 * v0.14.2 Mar 29 2000 Ching-Ling Lee
141 * Add clear to silence advance in trident_update_ptr 141 * Add clear to silence advance in trident_update_ptr
142 * fix invalid data of the end of the sound 142 * fix invalid data of the end of the sound
143 * v0.14.1 Mar 24 2000 Ching-Ling Lee 143 * v0.14.1 Mar 24 2000 Ching-Ling Lee
144 * ALi 5451 support added, playback and recording O.K. 144 * ALi 5451 support added, playback and recording O.K.
@@ -178,7 +178,7 @@
178 * SiS 7018 support added, playback O.K. 178 * SiS 7018 support added, playback O.K.
179 * v0.01 Alan Cox et. al. 179 * v0.01 Alan Cox et. al.
180 * Initial Release in kernel 2.3.30, does not work 180 * Initial Release in kernel 2.3.30, does not work
181 * 181 *
182 * ToDo 182 * ToDo
183 * Clean up of low level channel register access code. (done) 183 * Clean up of low level channel register access code. (done)
184 * Fix the bug on dma buffer management in update_ptr, read/write, drain_dac (done) 184 * Fix the bug on dma buffer management in update_ptr, read/write, drain_dac (done)
@@ -326,7 +326,7 @@ struct trident_state {
326 326
327 unsigned error; /* number of over/underruns */ 327 unsigned error; /* number of over/underruns */
328 /* put process on wait queue when no more space in buffer */ 328 /* put process on wait queue when no more space in buffer */
329 wait_queue_head_t wait; 329 wait_queue_head_t wait;
330 330
331 /* redundant, but makes calculations easier */ 331 /* redundant, but makes calculations easier */
332 unsigned fragsize; 332 unsigned fragsize;
@@ -358,7 +358,7 @@ struct trident_state {
358struct trident_channel { 358struct trident_channel {
359 int num; /* channel number */ 359 int num; /* channel number */
360 u32 lba; /* Loop Begine Address, where dma buffer starts */ 360 u32 lba; /* Loop Begine Address, where dma buffer starts */
361 u32 eso; /* End Sample Offset, wehre dma buffer ends */ 361 u32 eso; /* End Sample Offset, wehre dma buffer ends */
362 /* (in the unit of samples) */ 362 /* (in the unit of samples) */
363 u32 delta; /* delta value, sample rate / 48k for playback, */ 363 u32 delta; /* delta value, sample rate / 48k for playback, */
364 /* 48k/sample rate for recording */ 364 /* 48k/sample rate for recording */
@@ -417,7 +417,7 @@ struct trident_card {
417 /* soundcore stuff */ 417 /* soundcore stuff */
418 int dev_audio; 418 int dev_audio;
419 419
420 /* structures for abstraction of hardware facilities, codecs, */ 420 /* structures for abstraction of hardware facilities, codecs, */
421 /* banks and channels */ 421 /* banks and channels */
422 struct ac97_codec *ac97_codec[NR_AC97]; 422 struct ac97_codec *ac97_codec[NR_AC97];
423 struct trident_pcm_bank banks[NR_BANKS]; 423 struct trident_pcm_bank banks[NR_BANKS];
@@ -479,7 +479,7 @@ static void trident_ac97_set(struct ac97_codec *codec, u8 reg, u16 val);
479static u16 trident_ac97_get(struct ac97_codec *codec, u8 reg); 479static u16 trident_ac97_get(struct ac97_codec *codec, u8 reg);
480 480
481static int trident_open_mixdev(struct inode *inode, struct file *file); 481static int trident_open_mixdev(struct inode *inode, struct file *file);
482static int trident_ioctl_mixdev(struct inode *inode, struct file *file, 482static int trident_ioctl_mixdev(struct inode *inode, struct file *file,
483 unsigned int cmd, unsigned long arg); 483 unsigned int cmd, unsigned long arg);
484 484
485static void ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val); 485static void ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val);
@@ -496,10 +496,10 @@ static void ali_disable_spdif_in(struct trident_card *card);
496static void ali_disable_special_channel(struct trident_card *card, int ch); 496static void ali_disable_special_channel(struct trident_card *card, int ch);
497static void ali_setup_spdif_out(struct trident_card *card, int flag); 497static void ali_setup_spdif_out(struct trident_card *card, int flag);
498static int ali_write_5_1(struct trident_state *state, 498static int ali_write_5_1(struct trident_state *state,
499 const char __user *buffer, 499 const char __user *buffer,
500 int cnt_for_multi_channel, unsigned int *copy_count, 500 int cnt_for_multi_channel, unsigned int *copy_count,
501 unsigned int *state_cnt); 501 unsigned int *state_cnt);
502static int ali_allocate_other_states_resources(struct trident_state *state, 502static int ali_allocate_other_states_resources(struct trident_state *state,
503 int chan_nums); 503 int chan_nums);
504static void ali_free_other_states_resources(struct trident_state *state); 504static void ali_free_other_states_resources(struct trident_state *state);
505 505
@@ -722,7 +722,7 @@ trident_free_pcm_channel(struct trident_card *card, unsigned int channel)
722 if (channel < 31 || channel > 63) 722 if (channel < 31 || channel > 63)
723 return; 723 return;
724 724
725 if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX || 725 if (card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_DX ||
726 card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_NX) { 726 card->pci_id == PCI_DEVICE_ID_TRIDENT_4DWAVE_NX) {
727 b = inb(TRID_REG(card, T4D_REC_CH)); 727 b = inb(TRID_REG(card, T4D_REC_CH));
728 if ((b & ~0x80) == channel) 728 if ((b & ~0x80) == channel)
@@ -742,7 +742,7 @@ cyber_alloc_pcm_channel(struct trident_card *card)
742 int idx; 742 int idx;
743 743
744 /* The cyberpro 5050 has only 32 voices and one bank */ 744 /* The cyberpro 5050 has only 32 voices and one bank */
745 /* .. at least they are not documented (if you want to call that 745 /* .. at least they are not documented (if you want to call that
746 * crap documentation), perhaps broken ? */ 746 * crap documentation), perhaps broken ? */
747 747
748 bank = &card->banks[BANK_A]; 748 bank = &card->banks[BANK_A];
@@ -802,7 +802,7 @@ cyber_init_ritual(struct trident_card *card)
802 /* enable, if it was disabled */ 802 /* enable, if it was disabled */
803 if ((portDat & CYBER_BMSK_AUENZ) != CYBER_BMSK_AUENZ_ENABLE) { 803 if ((portDat & CYBER_BMSK_AUENZ) != CYBER_BMSK_AUENZ_ENABLE) {
804 printk(KERN_INFO "cyberpro5050: enabling audio controller\n"); 804 printk(KERN_INFO "cyberpro5050: enabling audio controller\n");
805 cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE, 805 cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE,
806 portDat | CYBER_BMSK_AUENZ_ENABLE); 806 portDat | CYBER_BMSK_AUENZ_ENABLE);
807 /* check again if hardware is enabled now */ 807 /* check again if hardware is enabled now */
808 portDat = cyber_inidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE); 808 portDat = cyber_inidx(CYBER_PORT_AUDIO, CYBER_IDX_AUDIO_ENABLE);
@@ -811,7 +811,7 @@ cyber_init_ritual(struct trident_card *card)
811 printk(KERN_ERR "cyberpro5050: initAudioAccess: no success\n"); 811 printk(KERN_ERR "cyberpro5050: initAudioAccess: no success\n");
812 ret = -1; 812 ret = -1;
813 } else { 813 } else {
814 cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE, 814 cyber_outidx(CYBER_PORT_AUDIO, CYBER_IDX_IRQ_ENABLE,
815 CYBER_BMSK_AUDIO_INT_ENABLE); 815 CYBER_BMSK_AUDIO_INT_ENABLE);
816 cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x01); 816 cyber_outidx(CYBER_PORT_AUDIO, 0xbf, 0x01);
817 cyber_outidx(CYBER_PORT_AUDIO, 0xba, 0x20); 817 cyber_outidx(CYBER_PORT_AUDIO, 0xba, 0x20);
@@ -827,7 +827,7 @@ cyber_init_ritual(struct trident_card *card)
827/* called with spin lock held */ 827/* called with spin lock held */
828 828
829static int 829static int
830trident_load_channel_registers(struct trident_card *card, u32 * data, 830trident_load_channel_registers(struct trident_card *card, u32 * data,
831 unsigned int channel) 831 unsigned int channel)
832{ 832{
833 int i; 833 int i;
@@ -845,7 +845,7 @@ trident_load_channel_registers(struct trident_card *card, u32 * data,
845 continue; 845 continue;
846 outl(data[i], TRID_REG(card, CHANNEL_START + 4 * i)); 846 outl(data[i], TRID_REG(card, CHANNEL_START + 4 * i));
847 } 847 }
848 if (card->pci_id == PCI_DEVICE_ID_ALI_5451 || 848 if (card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
849 card->pci_id == PCI_DEVICE_ID_INTERG_5050) { 849 card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
850 outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF1)); 850 outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF1));
851 outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF2)); 851 outl(ALI_EMOD_Still, TRID_REG(card, ALI_EBUF2));
@@ -884,7 +884,7 @@ trident_write_voice_regs(struct trident_state *state)
884 break; 884 break;
885 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX: 885 case PCI_DEVICE_ID_TRIDENT_4DWAVE_NX:
886 data[0] = (channel->delta << 24); 886 data[0] = (channel->delta << 24);
887 data[2] = ((channel->delta << 16) & 0xff000000) | 887 data[2] = ((channel->delta << 16) & 0xff000000) |
888 (channel->eso & 0x00ffffff); 888 (channel->eso & 0x00ffffff);
889 data[3] = channel->fm_vol & 0xffff; 889 data[3] = channel->fm_vol & 0xffff;
890 break; 890 break;
@@ -989,13 +989,13 @@ trident_play_setup(struct trident_state *state)
989 if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) { 989 if (state->card->pci_id != PCI_DEVICE_ID_SI_7018) {
990 channel->attribute = 0; 990 channel->attribute = 0;
991 if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451) { 991 if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451) {
992 if ((channel->num == ALI_SPDIF_IN_CHANNEL) || 992 if ((channel->num == ALI_SPDIF_IN_CHANNEL) ||
993 (channel->num == ALI_PCM_IN_CHANNEL)) 993 (channel->num == ALI_PCM_IN_CHANNEL))
994 ali_disable_special_channel(state->card, channel->num); 994 ali_disable_special_channel(state->card, channel->num);
995 else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL)) 995 else if ((inl(TRID_REG(state->card, ALI_GLOBAL_CONTROL))
996 & ALI_SPDIF_OUT_CH_ENABLE) 996 & ALI_SPDIF_OUT_CH_ENABLE)
997 && (channel->num == ALI_SPDIF_OUT_CHANNEL)) { 997 && (channel->num == ALI_SPDIF_OUT_CHANNEL)) {
998 ali_set_spdif_out_rate(state->card, 998 ali_set_spdif_out_rate(state->card,
999 state->dmabuf.rate); 999 state->dmabuf.rate);
1000 state->dmabuf.channel->delta = 0x1000; 1000 state->dmabuf.channel->delta = 0x1000;
1001 } 1001 }
@@ -1063,7 +1063,7 @@ trident_rec_setup(struct trident_state *state)
1063 1063
1064 channel->lba = dmabuf->dma_handle; 1064 channel->lba = dmabuf->dma_handle;
1065 channel->delta = compute_rate_rec(dmabuf->rate); 1065 channel->delta = compute_rate_rec(dmabuf->rate);
1066 if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) && 1066 if ((card->pci_id == PCI_DEVICE_ID_ALI_5451) &&
1067 (channel->num == ALI_SPDIF_IN_CHANNEL)) { 1067 (channel->num == ALI_SPDIF_IN_CHANNEL)) {
1068 rate = ali_get_spdif_in_rate(card); 1068 rate = ali_get_spdif_in_rate(card);
1069 if (rate == 0) { 1069 if (rate == 0) {
@@ -1180,8 +1180,8 @@ start_adc(struct trident_state *state)
1180 unsigned long flags; 1180 unsigned long flags;
1181 1181
1182 spin_lock_irqsave(&card->lock, flags); 1182 spin_lock_irqsave(&card->lock, flags);
1183 if ((dmabuf->mapped || 1183 if ((dmabuf->mapped ||
1184 dmabuf->count < (signed) dmabuf->dmasize) && 1184 dmabuf->count < (signed) dmabuf->dmasize) &&
1185 dmabuf->ready) { 1185 dmabuf->ready) {
1186 dmabuf->enable |= ADC_RUNNING; 1186 dmabuf->enable |= ADC_RUNNING;
1187 trident_enable_voice_irq(card, chan_num); 1187 trident_enable_voice_irq(card, chan_num);
@@ -1261,7 +1261,7 @@ alloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev, int order)
1261 void *rawbuf = NULL; 1261 void *rawbuf = NULL;
1262 struct page *page, *pend; 1262 struct page *page, *pend;
1263 1263
1264 if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order, 1264 if (!(rawbuf = pci_alloc_consistent(pci_dev, PAGE_SIZE << order,
1265 &dmabuf->dma_handle))) 1265 &dmabuf->dma_handle)))
1266 return -ENOMEM; 1266 return -ENOMEM;
1267 1267
@@ -1272,7 +1272,7 @@ alloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev, int order)
1272 dmabuf->rawbuf = rawbuf; 1272 dmabuf->rawbuf = rawbuf;
1273 dmabuf->buforder = order; 1273 dmabuf->buforder = order;
1274 1274
1275 /* now mark the pages as reserved; otherwise */ 1275 /* now mark the pages as reserved; otherwise */
1276 /* remap_pfn_range doesn't do what we want */ 1276 /* remap_pfn_range doesn't do what we want */
1277 pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1); 1277 pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
1278 for (page = virt_to_page(rawbuf); page <= pend; page++) 1278 for (page = virt_to_page(rawbuf); page <= pend; page++)
@@ -1310,7 +1310,7 @@ dealloc_dmabuf(struct dmabuf *dmabuf, struct pci_dev *pci_dev)
1310 pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1); 1310 pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
1311 for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++) 1311 for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
1312 ClearPageReserved(page); 1312 ClearPageReserved(page);
1313 pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder, 1313 pci_free_consistent(pci_dev, PAGE_SIZE << dmabuf->buforder,
1314 dmabuf->rawbuf, dmabuf->dma_handle); 1314 dmabuf->rawbuf, dmabuf->dma_handle);
1315 dmabuf->rawbuf = NULL; 1315 dmabuf->rawbuf = NULL;
1316 } 1316 }
@@ -1368,7 +1368,7 @@ prog_dmabuf(struct trident_state *state, enum dmabuf_mode rec)
1368 dealloc_dmabuf(&state->dmabuf, state->card->pci_dev); 1368 dealloc_dmabuf(&state->dmabuf, state->card->pci_dev);
1369 /* release the auxiliary DMA buffers */ 1369 /* release the auxiliary DMA buffers */
1370 for (i -= 2; i >= 0; i--) 1370 for (i -= 2; i >= 0; i--)
1371 dealloc_dmabuf(&state->other_states[i]->dmabuf, 1371 dealloc_dmabuf(&state->other_states[i]->dmabuf,
1372 state->card->pci_dev); 1372 state->card->pci_dev);
1373 unlock_set_fmt(state); 1373 unlock_set_fmt(state);
1374 return ret; 1374 return ret;
@@ -1398,7 +1398,7 @@ prog_dmabuf(struct trident_state *state, enum dmabuf_mode rec)
1398 dmabuf->fragsamples = dmabuf->fragsize >> sample_shift[dmabuf->fmt]; 1398 dmabuf->fragsamples = dmabuf->fragsize >> sample_shift[dmabuf->fmt];
1399 dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift; 1399 dmabuf->dmasize = dmabuf->numfrag << dmabuf->fragshift;
1400 1400
1401 memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80, 1401 memset(dmabuf->rawbuf, (dmabuf->fmt & TRIDENT_FMT_16BIT) ? 0 : 0x80,
1402 dmabuf->dmasize); 1402 dmabuf->dmasize);
1403 1403
1404 spin_lock_irqsave(&s->card->lock, flags); 1404 spin_lock_irqsave(&s->card->lock, flags);
@@ -1453,7 +1453,7 @@ trident_clear_tail(struct trident_state *state)
1453 swptr = dmabuf->swptr; 1453 swptr = dmabuf->swptr;
1454 spin_unlock_irqrestore(&state->card->lock, flags); 1454 spin_unlock_irqrestore(&state->card->lock, flags);
1455 1455
1456 if (swptr == 0 || swptr == dmabuf->dmasize / 2 || 1456 if (swptr == 0 || swptr == dmabuf->dmasize / 2 ||
1457 swptr == dmabuf->dmasize) 1457 swptr == dmabuf->dmasize)
1458 return; 1458 return;
1459 1459
@@ -1511,7 +1511,7 @@ drain_dac(struct trident_state *state, int nonblock)
1511 1511
1512 /* No matter how much data is left in the buffer, we have to wait until 1512 /* No matter how much data is left in the buffer, we have to wait until
1513 CSO == ESO/2 or CSO == ESO when address engine interrupts */ 1513 CSO == ESO/2 or CSO == ESO when address engine interrupts */
1514 if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 || 1514 if (state->card->pci_id == PCI_DEVICE_ID_ALI_5451 ||
1515 state->card->pci_id == PCI_DEVICE_ID_INTERG_5050) { 1515 state->card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
1516 diff = dmabuf->swptr - trident_get_dma_addr(state) + dmabuf->dmasize; 1516 diff = dmabuf->swptr - trident_get_dma_addr(state) + dmabuf->dmasize;
1517 diff = diff % (dmabuf->dmasize); 1517 diff = diff % (dmabuf->dmasize);
@@ -1532,7 +1532,7 @@ drain_dac(struct trident_state *state, int nonblock)
1532 return 0; 1532 return 0;
1533} 1533}
1534 1534
1535/* update buffer manangement pointers, especially, */ 1535/* update buffer manangement pointers, especially, */
1536/* dmabuf->count and dmabuf->hwptr */ 1536/* dmabuf->count and dmabuf->hwptr */
1537static void 1537static void
1538trident_update_ptr(struct trident_state *state) 1538trident_update_ptr(struct trident_state *state)
@@ -1559,11 +1559,11 @@ trident_update_ptr(struct trident_state *state)
1559 } else { 1559 } else {
1560 dmabuf->count += diff; 1560 dmabuf->count += diff;
1561 1561
1562 if (dmabuf->count < 0 || 1562 if (dmabuf->count < 0 ||
1563 dmabuf->count > dmabuf->dmasize) { 1563 dmabuf->count > dmabuf->dmasize) {
1564 /* buffer underrun or buffer overrun, */ 1564 /* buffer underrun or buffer overrun, */
1565 /* we have no way to recover it here, just */ 1565 /* we have no way to recover it here, just */
1566 /* stop the machine and let the process */ 1566 /* stop the machine and let the process */
1567 /* force hwptr and swptr to sync */ 1567 /* force hwptr and swptr to sync */
1568 __stop_adc(state); 1568 __stop_adc(state);
1569 dmabuf->error++; 1569 dmabuf->error++;
@@ -1582,7 +1582,7 @@ trident_update_ptr(struct trident_state *state)
1582 } else { 1582 } else {
1583 dmabuf->count -= diff; 1583 dmabuf->count -= diff;
1584 1584
1585 if (dmabuf->count < 0 || 1585 if (dmabuf->count < 0 ||
1586 dmabuf->count > dmabuf->dmasize) { 1586 dmabuf->count > dmabuf->dmasize) {
1587 /* buffer underrun or buffer overrun, we have no way to recover 1587 /* buffer underrun or buffer overrun, we have no way to recover
1588 it here, just stop the machine and let the process force hwptr 1588 it here, just stop the machine and let the process force hwptr
@@ -1608,13 +1608,13 @@ trident_update_ptr(struct trident_state *state)
1608 if (state->chans_num == 6) { 1608 if (state->chans_num == 6) {
1609 clear_cnt = clear_cnt / 2; 1609 clear_cnt = clear_cnt / 2;
1610 swptr = swptr / 2; 1610 swptr = swptr / 2;
1611 memset(state->other_states[0]->dmabuf.rawbuf + swptr, 1611 memset(state->other_states[0]->dmabuf.rawbuf + swptr,
1612 silence, clear_cnt); 1612 silence, clear_cnt);
1613 memset(state->other_states[1]->dmabuf.rawbuf + swptr, 1613 memset(state->other_states[1]->dmabuf.rawbuf + swptr,
1614 silence, clear_cnt); 1614 silence, clear_cnt);
1615 memset(state->other_states[2]->dmabuf.rawbuf + swptr, 1615 memset(state->other_states[2]->dmabuf.rawbuf + swptr,
1616 silence, clear_cnt); 1616 silence, clear_cnt);
1617 memset(state->other_states[3]->dmabuf.rawbuf + swptr, 1617 memset(state->other_states[3]->dmabuf.rawbuf + swptr,
1618 silence, clear_cnt); 1618 silence, clear_cnt);
1619 } 1619 }
1620 dmabuf->endcleared = 1; 1620 dmabuf->endcleared = 1;
@@ -1627,13 +1627,13 @@ trident_update_ptr(struct trident_state *state)
1627 if (state->chans_num == 6) { 1627 if (state->chans_num == 6) {
1628 clear_cnt = clear_cnt / 2; 1628 clear_cnt = clear_cnt / 2;
1629 swptr = swptr / 2; 1629 swptr = swptr / 2;
1630 memset(state->other_states[0]->dmabuf.rawbuf + swptr, 1630 memset(state->other_states[0]->dmabuf.rawbuf + swptr,
1631 silence, clear_cnt); 1631 silence, clear_cnt);
1632 memset(state->other_states[1]->dmabuf.rawbuf + swptr, 1632 memset(state->other_states[1]->dmabuf.rawbuf + swptr,
1633 silence, clear_cnt); 1633 silence, clear_cnt);
1634 memset(state->other_states[2]->dmabuf.rawbuf + swptr, 1634 memset(state->other_states[2]->dmabuf.rawbuf + swptr,
1635 silence, clear_cnt); 1635 silence, clear_cnt);
1636 memset(state->other_states[3]->dmabuf.rawbuf + swptr, 1636 memset(state->other_states[3]->dmabuf.rawbuf + swptr,
1637 silence, clear_cnt); 1637 silence, clear_cnt);
1638 } 1638 }
1639 dmabuf->endcleared = 1; 1639 dmabuf->endcleared = 1;
@@ -1665,7 +1665,7 @@ trident_address_interrupt(struct trident_card *card)
1665 if ((state = card->states[i]) != NULL) { 1665 if ((state = card->states[i]) != NULL) {
1666 trident_update_ptr(state); 1666 trident_update_ptr(state);
1667 } else { 1667 } else {
1668 printk(KERN_WARNING "trident: spurious channel " 1668 printk(KERN_WARNING "trident: spurious channel "
1669 "irq %d.\n", channel); 1669 "irq %d.\n", channel);
1670 trident_stop_voice(card, channel); 1670 trident_stop_voice(card, channel);
1671 trident_disable_voice_irq(card, channel); 1671 trident_disable_voice_irq(card, channel);
@@ -1694,7 +1694,7 @@ ali_hwvol_control(struct trident_card *card, int opt)
1694 1694
1695 if (opt == 1) { // MUTE 1695 if (opt == 1) { // MUTE
1696 dwTemp ^= 0x8000; 1696 dwTemp ^= 0x8000;
1697 ali_ac97_write(card->ac97_codec[0], 1697 ali_ac97_write(card->ac97_codec[0],
1698 0x02, dwTemp); 1698 0x02, dwTemp);
1699 } else if (opt == 2) { // Down 1699 } else if (opt == 2) { // Down
1700 if (mute) 1700 if (mute)
@@ -1706,7 +1706,7 @@ ali_hwvol_control(struct trident_card *card, int opt)
1706 dwTemp &= 0xe0e0; 1706 dwTemp &= 0xe0e0;
1707 dwTemp |= (volume[0]) | (volume[1] << 8); 1707 dwTemp |= (volume[0]) | (volume[1] << 8);
1708 ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp); 1708 ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
1709 card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) | 1709 card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
1710 (((32 - volume[1]) * 25 / 8) << 8); 1710 (((32 - volume[1]) * 25 / 8) << 8);
1711 } else if (opt == 4) { // Up 1711 } else if (opt == 4) { // Up
1712 if (mute) 1712 if (mute)
@@ -1718,7 +1718,7 @@ ali_hwvol_control(struct trident_card *card, int opt)
1718 dwTemp &= 0xe0e0; 1718 dwTemp &= 0xe0e0;
1719 dwTemp |= (volume[0]) | (volume[1] << 8); 1719 dwTemp |= (volume[0]) | (volume[1] << 8);
1720 ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp); 1720 ali_ac97_write(card->ac97_codec[0], 0x02, dwTemp);
1721 card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) | 1721 card->ac97_codec[0]->mixer_state[0] = ((32 - volume[0]) * 25 / 8) |
1722 (((32 - volume[1]) * 25 / 8) << 8); 1722 (((32 - volume[1]) * 25 / 8) << 8);
1723 } else { 1723 } else {
1724 /* Nothing needs doing */ 1724 /* Nothing needs doing */
@@ -1801,7 +1801,7 @@ cyber_address_interrupt(struct trident_card *card)
1801 if ((state = card->states[i]) != NULL) { 1801 if ((state = card->states[i]) != NULL) {
1802 trident_update_ptr(state); 1802 trident_update_ptr(state);
1803 } else { 1803 } else {
1804 printk(KERN_WARNING "cyber5050: spurious " 1804 printk(KERN_WARNING "cyber5050: spurious "
1805 "channel irq %d.\n", channel); 1805 "channel irq %d.\n", channel);
1806 trident_stop_voice(card, channel); 1806 trident_stop_voice(card, channel);
1807 trident_disable_voice_irq(card, channel); 1807 trident_disable_voice_irq(card, channel);
@@ -1836,21 +1836,21 @@ trident_interrupt(int irq, void *dev_id)
1836 ali_queue_task(card, gpio & 0x07); 1836 ali_queue_task(card, gpio & 0x07);
1837 } 1837 }
1838 event = inl(TRID_REG(card, T4D_MISCINT)); 1838 event = inl(TRID_REG(card, T4D_MISCINT));
1839 outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), 1839 outl(event | (ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
1840 TRID_REG(card, T4D_MISCINT)); 1840 TRID_REG(card, T4D_MISCINT));
1841 spin_unlock(&card->lock); 1841 spin_unlock(&card->lock);
1842 return IRQ_HANDLED; 1842 return IRQ_HANDLED;
1843 } 1843 }
1844 1844
1845 /* manually clear interrupt status, bad hardware design, blame T^2 */ 1845 /* manually clear interrupt status, bad hardware design, blame T^2 */
1846 outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW), 1846 outl((ST_TARGET_REACHED | MIXER_OVERFLOW | MIXER_UNDERFLOW),
1847 TRID_REG(card, T4D_MISCINT)); 1847 TRID_REG(card, T4D_MISCINT));
1848 spin_unlock(&card->lock); 1848 spin_unlock(&card->lock);
1849 return IRQ_HANDLED; 1849 return IRQ_HANDLED;
1850} 1850}
1851 1851
1852/* in this loop, dmabuf.count signifies the amount of data that is waiting */ 1852/* in this loop, dmabuf.count signifies the amount of data that is waiting */
1853/* to be copied to the user's buffer. it is filled by the dma machine and */ 1853/* to be copied to the user's buffer. it is filled by the dma machine and */
1854/* drained by this loop. */ 1854/* drained by this loop. */
1855static ssize_t 1855static ssize_t
1856trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos) 1856trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
@@ -1878,8 +1878,8 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1878 while (count > 0) { 1878 while (count > 0) {
1879 spin_lock_irqsave(&state->card->lock, flags); 1879 spin_lock_irqsave(&state->card->lock, flags);
1880 if (dmabuf->count > (signed) dmabuf->dmasize) { 1880 if (dmabuf->count > (signed) dmabuf->dmasize) {
1881 /* buffer overrun, we are recovering from */ 1881 /* buffer overrun, we are recovering from */
1882 /* sleep_on_timeout, resync hwptr and swptr, */ 1882 /* sleep_on_timeout, resync hwptr and swptr, */
1883 /* make process flush the buffer */ 1883 /* make process flush the buffer */
1884 dmabuf->count = dmabuf->dmasize; 1884 dmabuf->count = dmabuf->dmasize;
1885 dmabuf->swptr = dmabuf->hwptr; 1885 dmabuf->swptr = dmabuf->hwptr;
@@ -1894,7 +1894,7 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1894 cnt = count; 1894 cnt = count;
1895 if (cnt <= 0) { 1895 if (cnt <= 0) {
1896 unsigned long tmo; 1896 unsigned long tmo;
1897 /* buffer is empty, start the dma machine and */ 1897 /* buffer is empty, start the dma machine and */
1898 /* wait for data to be recorded */ 1898 /* wait for data to be recorded */
1899 start_adc(state); 1899 start_adc(state);
1900 if (file->f_flags & O_NONBLOCK) { 1900 if (file->f_flags & O_NONBLOCK) {
@@ -1904,8 +1904,8 @@ trident_read(struct file *file, char __user *buffer, size_t count, loff_t * ppos
1904 } 1904 }
1905 1905
1906 mutex_unlock(&state->sem); 1906 mutex_unlock(&state->sem);
1907 /* No matter how much space left in the buffer, */ 1907 /* No matter how much space left in the buffer, */
1908 /* we have to wait until CSO == ESO/2 or CSO == ESO */ 1908 /* we have to wait until CSO == ESO/2 or CSO == ESO */
1909 /* when address engine interrupts */ 1909 /* when address engine interrupts */
1910 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2); 1910 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
1911 tmo >>= sample_shift[dmabuf->fmt]; 1911 tmo >>= sample_shift[dmabuf->fmt];
@@ -2005,7 +2005,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2005 while (count > 0) { 2005 while (count > 0) {
2006 spin_lock_irqsave(&state->card->lock, flags); 2006 spin_lock_irqsave(&state->card->lock, flags);
2007 if (dmabuf->count < 0) { 2007 if (dmabuf->count < 0) {
2008 /* buffer underrun, we are recovering from */ 2008 /* buffer underrun, we are recovering from */
2009 /* sleep_on_timeout, resync hwptr and swptr */ 2009 /* sleep_on_timeout, resync hwptr and swptr */
2010 dmabuf->count = 0; 2010 dmabuf->count = 0;
2011 dmabuf->swptr = dmabuf->hwptr; 2011 dmabuf->swptr = dmabuf->hwptr;
@@ -2020,7 +2020,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2020 cnt = count; 2020 cnt = count;
2021 if (cnt <= 0) { 2021 if (cnt <= 0) {
2022 unsigned long tmo; 2022 unsigned long tmo;
2023 /* buffer is full, start the dma machine and */ 2023 /* buffer is full, start the dma machine and */
2024 /* wait for data to be played */ 2024 /* wait for data to be played */
2025 start_dac(state); 2025 start_dac(state);
2026 if (file->f_flags & O_NONBLOCK) { 2026 if (file->f_flags & O_NONBLOCK) {
@@ -2028,8 +2028,8 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2028 ret = -EAGAIN; 2028 ret = -EAGAIN;
2029 goto out; 2029 goto out;
2030 } 2030 }
2031 /* No matter how much data left in the buffer, */ 2031 /* No matter how much data left in the buffer, */
2032 /* we have to wait until CSO == ESO/2 or CSO == ESO */ 2032 /* we have to wait until CSO == ESO/2 or CSO == ESO */
2033 /* when address engine interrupts */ 2033 /* when address engine interrupts */
2034 lock_set_fmt(state); 2034 lock_set_fmt(state);
2035 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2); 2035 tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
@@ -2037,15 +2037,15 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2037 unlock_set_fmt(state); 2037 unlock_set_fmt(state);
2038 mutex_unlock(&state->sem); 2038 mutex_unlock(&state->sem);
2039 2039
2040 /* There are two situations when sleep_on_timeout */ 2040 /* There are two situations when sleep_on_timeout */
2041 /* returns, one is when the interrupt is serviced */ 2041 /* returns, one is when the interrupt is serviced */
2042 /* correctly and the process is waked up by ISR */ 2042 /* correctly and the process is waked up by ISR */
2043 /* ON TIME. Another is when timeout is expired, which */ 2043 /* ON TIME. Another is when timeout is expired, which */
2044 /* means that either interrupt is NOT serviced */ 2044 /* means that either interrupt is NOT serviced */
2045 /* correctly (pending interrupt) or it is TOO LATE */ 2045 /* correctly (pending interrupt) or it is TOO LATE */
2046 /* for the process to be scheduled to run */ 2046 /* for the process to be scheduled to run */
2047 /* (scheduler latency) which results in a (potential) */ 2047 /* (scheduler latency) which results in a (potential) */
2048 /* buffer underrun. And worse, there is NOTHING we */ 2048 /* buffer underrun. And worse, there is NOTHING we */
2049 /* can do to prevent it. */ 2049 /* can do to prevent it. */
2050 if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) { 2050 if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
2051 pr_debug(KERN_ERR "trident: playback schedule " 2051 pr_debug(KERN_ERR "trident: playback schedule "
@@ -2054,8 +2054,8 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2054 dmabuf->fragsize, dmabuf->count, 2054 dmabuf->fragsize, dmabuf->count,
2055 dmabuf->hwptr, dmabuf->swptr); 2055 dmabuf->hwptr, dmabuf->swptr);
2056 2056
2057 /* a buffer underrun, we delay the recovery */ 2057 /* a buffer underrun, we delay the recovery */
2058 /* until next time the while loop begin and */ 2058 /* until next time the while loop begin and */
2059 /* we REALLY have data to play */ 2059 /* we REALLY have data to play */
2060 } 2060 }
2061 if (signal_pending(current)) { 2061 if (signal_pending(current)) {
@@ -2079,7 +2079,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2079 if (state->chans_num == 6) { 2079 if (state->chans_num == 6) {
2080 copy_count = 0; 2080 copy_count = 0;
2081 state_cnt = 0; 2081 state_cnt = 0;
2082 if (ali_write_5_1(state, buffer, cnt, &copy_count, 2082 if (ali_write_5_1(state, buffer, cnt, &copy_count,
2083 &state_cnt) == -EFAULT) { 2083 &state_cnt) == -EFAULT) {
2084 if (state_cnt) { 2084 if (state_cnt) {
2085 swptr = (swptr + state_cnt) % dmabuf->dmasize; 2085 swptr = (swptr + state_cnt) % dmabuf->dmasize;
@@ -2096,7 +2096,7 @@ trident_write(struct file *file, const char __user *buffer, size_t count, loff_t
2096 goto out; 2096 goto out;
2097 } 2097 }
2098 } else { 2098 } else {
2099 if (copy_from_user(dmabuf->rawbuf + swptr, 2099 if (copy_from_user(dmabuf->rawbuf + swptr,
2100 buffer, cnt)) { 2100 buffer, cnt)) {
2101 if (!ret) 2101 if (!ret)
2102 ret = -EFAULT; 2102 ret = -EFAULT;
@@ -2172,7 +2172,7 @@ trident_poll(struct file *file, struct poll_table_struct *wait)
2172 if (dmabuf->count >= (signed) dmabuf->fragsize) 2172 if (dmabuf->count >= (signed) dmabuf->fragsize)
2173 mask |= POLLOUT | POLLWRNORM; 2173 mask |= POLLOUT | POLLWRNORM;
2174 } else { 2174 } else {
2175 if ((signed) dmabuf->dmasize >= dmabuf->count + 2175 if ((signed) dmabuf->dmasize >= dmabuf->count +
2176 (signed) dmabuf->fragsize) 2176 (signed) dmabuf->fragsize)
2177 mask |= POLLOUT | POLLWRNORM; 2177 mask |= POLLOUT | POLLWRNORM;
2178 } 2178 }
@@ -2227,7 +2227,7 @@ out:
2227} 2227}
2228 2228
2229static int 2229static int
2230trident_ioctl(struct inode *inode, struct file *file, 2230trident_ioctl(struct inode *inode, struct file *file,
2231 unsigned int cmd, unsigned long arg) 2231 unsigned int cmd, unsigned long arg)
2232{ 2232{
2233 struct trident_state *state = (struct trident_state *)file->private_data; 2233 struct trident_state *state = (struct trident_state *)file->private_data;
@@ -2348,7 +2348,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2348 2348
2349 2349
2350 case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format */ 2350 case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format */
2351 ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 | 2351 ret = put_user(AFMT_S16_LE | AFMT_U16_LE | AFMT_S8 |
2352 AFMT_U8, p); 2352 AFMT_U8, p);
2353 break; 2353 break;
2354 2354
@@ -2379,7 +2379,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2379 } 2379 }
2380 } 2380 }
2381 unlock_set_fmt(state); 2381 unlock_set_fmt(state);
2382 ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE : 2382 ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
2383 AFMT_U8, p); 2383 AFMT_U8, p);
2384 break; 2384 break;
2385 2385
@@ -2438,7 +2438,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2438 stop_adc(state); 2438 stop_adc(state);
2439 dmabuf->ready = 0; 2439 dmabuf->ready = 0;
2440 if (val >= 2) { 2440 if (val >= 2) {
2441 if (!((file->f_mode & FMODE_WRITE) && 2441 if (!((file->f_mode & FMODE_WRITE) &&
2442 (val == 6))) 2442 (val == 6)))
2443 val = 2; 2443 val = 2;
2444 dmabuf->fmt |= TRIDENT_FMT_STEREO; 2444 dmabuf->fmt |= TRIDENT_FMT_STEREO;
@@ -2504,7 +2504,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2504 abinfo.fragstotal = dmabuf->numfrag; 2504 abinfo.fragstotal = dmabuf->numfrag;
2505 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift; 2505 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
2506 spin_unlock_irqrestore(&state->card->lock, flags); 2506 spin_unlock_irqrestore(&state->card->lock, flags);
2507 ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ? 2507 ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
2508 -EFAULT : 0; 2508 -EFAULT : 0;
2509 break; 2509 break;
2510 2510
@@ -2524,7 +2524,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2524 abinfo.fragstotal = dmabuf->numfrag; 2524 abinfo.fragstotal = dmabuf->numfrag;
2525 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift; 2525 abinfo.fragments = abinfo.bytes >> dmabuf->fragshift;
2526 spin_unlock_irqrestore(&state->card->lock, flags); 2526 spin_unlock_irqrestore(&state->card->lock, flags);
2527 ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ? 2527 ret = copy_to_user(argp, &abinfo, sizeof (abinfo)) ?
2528 -EFAULT : 0; 2528 -EFAULT : 0;
2529 break; 2529 break;
2530 2530
@@ -2533,7 +2533,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2533 break; 2533 break;
2534 2534
2535 case SNDCTL_DSP_GETCAPS: 2535 case SNDCTL_DSP_GETCAPS:
2536 ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER | 2536 ret = put_user(DSP_CAP_REALTIME | DSP_CAP_TRIGGER |
2537 DSP_CAP_MMAP | DSP_CAP_BIND, p); 2537 DSP_CAP_MMAP | DSP_CAP_BIND, p);
2538 break; 2538 break;
2539 2539
@@ -2553,7 +2553,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2553 } 2553 }
2554 if (file->f_mode & FMODE_READ) { 2554 if (file->f_mode & FMODE_READ) {
2555 if (val & PCM_ENABLE_INPUT) { 2555 if (val & PCM_ENABLE_INPUT) {
2556 if (!dmabuf->ready && 2556 if (!dmabuf->ready &&
2557 (ret = prog_dmabuf_record(state))) 2557 (ret = prog_dmabuf_record(state)))
2558 break; 2558 break;
2559 start_adc(state); 2559 start_adc(state);
@@ -2562,7 +2562,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2562 } 2562 }
2563 if (file->f_mode & FMODE_WRITE) { 2563 if (file->f_mode & FMODE_WRITE) {
2564 if (val & PCM_ENABLE_OUTPUT) { 2564 if (val & PCM_ENABLE_OUTPUT) {
2565 if (!dmabuf->ready && 2565 if (!dmabuf->ready &&
2566 (ret = prog_dmabuf_playback(state))) 2566 (ret = prog_dmabuf_playback(state)))
2567 break; 2567 break;
2568 start_dac(state); 2568 start_dac(state);
@@ -2589,7 +2589,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2589 if (dmabuf->mapped) 2589 if (dmabuf->mapped)
2590 dmabuf->count &= dmabuf->fragsize - 1; 2590 dmabuf->count &= dmabuf->fragsize - 1;
2591 spin_unlock_irqrestore(&state->card->lock, flags); 2591 spin_unlock_irqrestore(&state->card->lock, flags);
2592 ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ? 2592 ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
2593 -EFAULT : 0; 2593 -EFAULT : 0;
2594 break; 2594 break;
2595 2595
@@ -2612,7 +2612,7 @@ trident_ioctl(struct inode *inode, struct file *file,
2612 if (dmabuf->mapped) 2612 if (dmabuf->mapped)
2613 dmabuf->count &= dmabuf->fragsize - 1; 2613 dmabuf->count &= dmabuf->fragsize - 1;
2614 spin_unlock_irqrestore(&state->card->lock, flags); 2614 spin_unlock_irqrestore(&state->card->lock, flags);
2615 ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ? 2615 ret = copy_to_user(argp, &cinfo, sizeof (cinfo)) ?
2616 -EFAULT : 0; 2616 -EFAULT : 0;
2617 break; 2617 break;
2618 2618
@@ -2641,17 +2641,17 @@ trident_ioctl(struct inode *inode, struct file *file,
2641 break; 2641 break;
2642 2642
2643 case SOUND_PCM_READ_CHANNELS: 2643 case SOUND_PCM_READ_CHANNELS:
2644 ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1, 2644 ret = put_user((dmabuf->fmt & TRIDENT_FMT_STEREO) ? 2 : 1,
2645 p); 2645 p);
2646 break; 2646 break;
2647 2647
2648 case SOUND_PCM_READ_BITS: 2648 case SOUND_PCM_READ_BITS:
2649 ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE : 2649 ret = put_user((dmabuf->fmt & TRIDENT_FMT_16BIT) ? AFMT_S16_LE :
2650 AFMT_U8, p); 2650 AFMT_U8, p);
2651 break; 2651 break;
2652 2652
2653 case SNDCTL_DSP_GETCHANNELMASK: 2653 case SNDCTL_DSP_GETCHANNELMASK:
2654 ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR | 2654 ret = put_user(DSP_BIND_FRONT | DSP_BIND_SURR |
2655 DSP_BIND_CENTER_LFE, p); 2655 DSP_BIND_CENTER_LFE, p);
2656 break; 2656 break;
2657 2657
@@ -2671,10 +2671,10 @@ trident_ioctl(struct inode *inode, struct file *file,
2671 } else { 2671 } else {
2672 dmabuf->ready = 0; 2672 dmabuf->ready = 0;
2673 if (file->f_mode & FMODE_READ) 2673 if (file->f_mode & FMODE_READ)
2674 dmabuf->channel->attribute = (CHANNEL_REC | 2674 dmabuf->channel->attribute = (CHANNEL_REC |
2675 SRC_ENABLE); 2675 SRC_ENABLE);
2676 if (file->f_mode & FMODE_WRITE) 2676 if (file->f_mode & FMODE_WRITE)
2677 dmabuf->channel->attribute = (CHANNEL_SPC_PB | 2677 dmabuf->channel->attribute = (CHANNEL_SPC_PB |
2678 SRC_ENABLE); 2678 SRC_ENABLE);
2679 dmabuf->channel->attribute |= mask2attr[ffs(val)]; 2679 dmabuf->channel->attribute |= mask2attr[ffs(val)];
2680 } 2680 }
@@ -2702,6 +2702,7 @@ trident_open(struct inode *inode, struct file *file)
2702 struct trident_card *card = devs; 2702 struct trident_card *card = devs;
2703 struct trident_state *state = NULL; 2703 struct trident_state *state = NULL;
2704 struct dmabuf *dmabuf = NULL; 2704 struct dmabuf *dmabuf = NULL;
2705 unsigned long flags;
2705 2706
2706 /* Added by Matt Wu 01-05-2001 */ 2707 /* Added by Matt Wu 01-05-2001 */
2707 /* TODO: there's some redundacy here wrt the check below */ 2708 /* TODO: there's some redundacy here wrt the check below */
@@ -2765,8 +2766,8 @@ trident_open(struct inode *inode, struct file *file)
2765 init_waitqueue_head(&dmabuf->wait); 2766 init_waitqueue_head(&dmabuf->wait);
2766 file->private_data = state; 2767 file->private_data = state;
2767 2768
2768 /* set default sample format. According to OSS Programmer's */ 2769 /* set default sample format. According to OSS Programmer's */
2769 /* Guide /dev/dsp should be default to unsigned 8-bits, mono, */ 2770 /* Guide /dev/dsp should be default to unsigned 8-bits, mono, */
2770 /* with sample rate 8kHz and /dev/dspW will accept 16-bits sample */ 2771 /* with sample rate 8kHz and /dev/dspW will accept 16-bits sample */
2771 if (file->f_mode & FMODE_WRITE) { 2772 if (file->f_mode & FMODE_WRITE) {
2772 dmabuf->fmt &= ~TRIDENT_FMT_MASK; 2773 dmabuf->fmt &= ~TRIDENT_FMT_MASK;
@@ -2779,11 +2780,13 @@ trident_open(struct inode *inode, struct file *file)
2779 /* set default channel attribute to normal playback */ 2780 /* set default channel attribute to normal playback */
2780 dmabuf->channel->attribute = CHANNEL_PB; 2781 dmabuf->channel->attribute = CHANNEL_PB;
2781 } 2782 }
2783 spin_lock_irqsave(&card->lock, flags);
2782 trident_set_dac_rate(state, 8000); 2784 trident_set_dac_rate(state, 8000);
2785 spin_unlock_irqrestore(&card->lock, flags);
2783 } 2786 }
2784 2787
2785 if (file->f_mode & FMODE_READ) { 2788 if (file->f_mode & FMODE_READ) {
2786 /* FIXME: Trident 4d can only record in signed 16-bits stereo, */ 2789 /* FIXME: Trident 4d can only record in signed 16-bits stereo, */
2787 /* 48kHz sample, to be dealed with in trident_set_adc_rate() ?? */ 2790 /* 48kHz sample, to be dealed with in trident_set_adc_rate() ?? */
2788 dmabuf->fmt &= ~TRIDENT_FMT_MASK; 2791 dmabuf->fmt &= ~TRIDENT_FMT_MASK;
2789 if ((minor & 0x0f) == SND_DEV_DSP16) 2792 if ((minor & 0x0f) == SND_DEV_DSP16)
@@ -2794,10 +2797,12 @@ trident_open(struct inode *inode, struct file *file)
2794 if (card->pci_id == PCI_DEVICE_ID_SI_7018) { 2797 if (card->pci_id == PCI_DEVICE_ID_SI_7018) {
2795 /* set default channel attribute to 0x8a80, record from 2798 /* set default channel attribute to 0x8a80, record from
2796 PCM L/R FIFO and mono = (left + right + 1)/2 */ 2799 PCM L/R FIFO and mono = (left + right + 1)/2 */
2797 dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR | 2800 dmabuf->channel->attribute = (CHANNEL_REC | PCM_LR |
2798 MONO_MIX); 2801 MONO_MIX);
2799 } 2802 }
2803 spin_lock_irqsave(&card->lock, flags);
2800 trident_set_adc_rate(state, 8000); 2804 trident_set_adc_rate(state, 8000);
2805 spin_unlock_irqrestore(&card->lock, flags);
2801 2806
2802 /* Added by Matt Wu 01-05-2001 */ 2807 /* Added by Matt Wu 01-05-2001 */
2803 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) 2808 if (card->pci_id == PCI_DEVICE_ID_ALI_5451)
@@ -3020,7 +3025,7 @@ acquirecodecaccess(struct trident_card *card)
3020 break; 3025 break;
3021 if (wsemabits == 0) { 3026 if (wsemabits == 0) {
3022 unlock: 3027 unlock:
3023 outl(((u32) (wcontrol & 0x1eff) | 0x00004000), 3028 outl(((u32) (wcontrol & 0x1eff) | 0x00004000),
3024 TRID_REG(card, ALI_AC97_WRITE)); 3029 TRID_REG(card, ALI_AC97_WRITE));
3025 continue; 3030 continue;
3026 } 3031 }
@@ -3104,7 +3109,7 @@ ali_ac97_get(struct trident_card *card, int secondary, u8 reg)
3104 ncount = 10; 3109 ncount = 10;
3105 3110
3106 while (1) { 3111 while (1) {
3107 if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ) 3112 if ((inw(TRID_REG(card, ALI_AC97_WRITE)) & ALI_AC97_BUSY_READ)
3108 != 0) 3113 != 0)
3109 break; 3114 break;
3110 if (ncount <= 0) 3115 if (ncount <= 0)
@@ -3112,7 +3117,7 @@ ali_ac97_get(struct trident_card *card, int secondary, u8 reg)
3112 if (ncount-- == 1) { 3117 if (ncount-- == 1) {
3113 pr_debug("ali_ac97_read :try clear busy flag\n"); 3118 pr_debug("ali_ac97_read :try clear busy flag\n");
3114 aud_reg = inl(TRID_REG(card, ALI_AC97_WRITE)); 3119 aud_reg = inl(TRID_REG(card, ALI_AC97_WRITE));
3115 outl((aud_reg & 0xffff7fff), 3120 outl((aud_reg & 0xffff7fff),
3116 TRID_REG(card, ALI_AC97_WRITE)); 3121 TRID_REG(card, ALI_AC97_WRITE));
3117 } 3122 }
3118 udelay(10); 3123 udelay(10);
@@ -3159,7 +3164,7 @@ ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val)
3159 3164
3160 wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE)); 3165 wcontrol = inw(TRID_REG(card, ALI_AC97_WRITE));
3161 wcontrol &= 0xff00; 3166 wcontrol &= 0xff00;
3162 wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */ 3167 wcontrol |= (0x8100 | reg); /* bit 8=1: (ali1535 )reserved/ */
3163 /* ali1535+ write */ 3168 /* ali1535+ write */
3164 outl((data | wcontrol), TRID_REG(card, ALI_AC97_WRITE)); 3169 outl((data | wcontrol), TRID_REG(card, ALI_AC97_WRITE));
3165 3170
@@ -3177,7 +3182,7 @@ ali_ac97_set(struct trident_card *card, int secondary, u8 reg, u16 val)
3177 break; 3182 break;
3178 if (ncount-- == 1) { 3183 if (ncount-- == 1) {
3179 pr_debug("ali_ac97_set :try clear busy flag!!\n"); 3184 pr_debug("ali_ac97_set :try clear busy flag!!\n");
3180 outw(wcontrol & 0x7fff, 3185 outw(wcontrol & 0x7fff,
3181 TRID_REG(card, ALI_AC97_WRITE)); 3186 TRID_REG(card, ALI_AC97_WRITE));
3182 } 3187 }
3183 udelay(10); 3188 udelay(10);
@@ -3382,7 +3387,7 @@ ali_detect_spdif_rate(struct trident_card *card)
3382 bval |= 0x1F; 3387 bval |= 0x1F;
3383 outb(bval, TRID_REG(card, ALI_SPDIF_CTRL + 1)); 3388 outb(bval, TRID_REG(card, ALI_SPDIF_CTRL + 1));
3384 3389
3385 while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) && 3390 while (((R1 < 0x0B) || (R1 > 0x0E)) && (R1 != 0x12) &&
3386 count <= 50000) { 3391 count <= 50000) {
3387 count++; 3392 count++;
3388 3393
@@ -3669,14 +3674,14 @@ ali_save_regs(struct trident_card *card)
3669 spin_lock_irqsave(&card->lock, flags); 3674 spin_lock_irqsave(&card->lock, flags);
3670 3675
3671 ali_registers.global_regs[0x2c] = inl(TRID_REG(card, T4D_MISCINT)); 3676 ali_registers.global_regs[0x2c] = inl(TRID_REG(card, T4D_MISCINT));
3672 //ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A)); 3677 //ali_registers.global_regs[0x20] = inl(TRID_REG(card,T4D_START_A));
3673 ali_registers.global_regs[0x21] = inl(TRID_REG(card, T4D_STOP_A)); 3678 ali_registers.global_regs[0x21] = inl(TRID_REG(card, T4D_STOP_A));
3674 3679
3675 //disable all IRQ bits 3680 //disable all IRQ bits
3676 outl(ALI_DISABLE_ALL_IRQ, TRID_REG(card, T4D_MISCINT)); 3681 outl(ALI_DISABLE_ALL_IRQ, TRID_REG(card, T4D_MISCINT));
3677 3682
3678 for (i = 1; i < ALI_MIXER_REGS; i++) 3683 for (i = 1; i < ALI_MIXER_REGS; i++)
3679 ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0], 3684 ali_registers.mixer_regs[i] = ali_ac97_read(card->ac97_codec[0],
3680 i * 2); 3685 i * 2);
3681 3686
3682 for (i = 0; i < ALI_GLOBAL_REGS; i++) { 3687 for (i = 0; i < ALI_GLOBAL_REGS; i++) {
@@ -3688,7 +3693,7 @@ ali_save_regs(struct trident_card *card)
3688 for (i = 0; i < ALI_CHANNELS; i++) { 3693 for (i = 0; i < ALI_CHANNELS; i++) {
3689 outb(i, TRID_REG(card, T4D_LFO_GC_CIR)); 3694 outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
3690 for (j = 0; j < ALI_CHANNEL_REGS; j++) 3695 for (j = 0; j < ALI_CHANNEL_REGS; j++)
3691 ali_registers.channel_regs[i][j] = inl(TRID_REG(card, 3696 ali_registers.channel_regs[i][j] = inl(TRID_REG(card,
3692 j * 4 + 0xe0)); 3697 j * 4 + 0xe0));
3693 } 3698 }
3694 3699
@@ -3707,18 +3712,18 @@ ali_restore_regs(struct trident_card *card)
3707 spin_lock_irqsave(&card->lock, flags); 3712 spin_lock_irqsave(&card->lock, flags);
3708 3713
3709 for (i = 1; i < ALI_MIXER_REGS; i++) 3714 for (i = 1; i < ALI_MIXER_REGS; i++)
3710 ali_ac97_write(card->ac97_codec[0], i * 2, 3715 ali_ac97_write(card->ac97_codec[0], i * 2,
3711 ali_registers.mixer_regs[i]); 3716 ali_registers.mixer_regs[i]);
3712 3717
3713 for (i = 0; i < ALI_CHANNELS; i++) { 3718 for (i = 0; i < ALI_CHANNELS; i++) {
3714 outb(i, TRID_REG(card, T4D_LFO_GC_CIR)); 3719 outb(i, TRID_REG(card, T4D_LFO_GC_CIR));
3715 for (j = 0; j < ALI_CHANNEL_REGS; j++) 3720 for (j = 0; j < ALI_CHANNEL_REGS; j++)
3716 outl(ali_registers.channel_regs[i][j], 3721 outl(ali_registers.channel_regs[i][j],
3717 TRID_REG(card, j * 4 + 0xe0)); 3722 TRID_REG(card, j * 4 + 0xe0));
3718 } 3723 }
3719 3724
3720 for (i = 0; i < ALI_GLOBAL_REGS; i++) { 3725 for (i = 0; i < ALI_GLOBAL_REGS; i++) {
3721 if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) || 3726 if ((i * 4 == T4D_MISCINT) || (i * 4 == T4D_STOP_A) ||
3722 (i * 4 == T4D_START_A)) 3727 (i * 4 == T4D_START_A))
3723 continue; 3728 continue;
3724 outl(ali_registers.global_regs[i], TRID_REG(card, i * 4)); 3729 outl(ali_registers.global_regs[i], TRID_REG(card, i * 4));
@@ -3763,7 +3768,7 @@ ali_alloc_pcm_channel(struct trident_card *card)
3763 3768
3764 bank = &card->banks[BANK_A]; 3769 bank = &card->banks[BANK_A];
3765 3770
3766 if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) & 3771 if (inl(TRID_REG(card, ALI_GLOBAL_CONTROL)) &
3767 (ALI_SPDIF_OUT_CH_ENABLE)) { 3772 (ALI_SPDIF_OUT_CH_ENABLE)) {
3768 idx = ALI_SPDIF_OUT_CHANNEL; 3773 idx = ALI_SPDIF_OUT_CHANNEL;
3769 if (!(bank->bitmap & (1 << idx))) { 3774 if (!(bank->bitmap & (1 << idx))) {
@@ -3774,7 +3779,7 @@ ali_alloc_pcm_channel(struct trident_card *card)
3774 } 3779 }
3775 } 3780 }
3776 3781
3777 for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST; 3782 for (idx = ALI_PCM_OUT_CHANNEL_FIRST; idx <= ALI_PCM_OUT_CHANNEL_LAST;
3778 idx++) { 3783 idx++) {
3779 if (!(bank->bitmap & (1 << idx))) { 3784 if (!(bank->bitmap & (1 << idx))) {
3780 struct trident_channel *channel = &bank->channels[idx]; 3785 struct trident_channel *channel = &bank->channels[idx];
@@ -3785,9 +3790,9 @@ ali_alloc_pcm_channel(struct trident_card *card)
3785 } 3790 }
3786 3791
3787 /* no more free channels avaliable */ 3792 /* no more free channels avaliable */
3788#if 0 3793#if 0
3789 printk(KERN_ERR "ali: no more channels available on Bank A.\n"); 3794 printk(KERN_ERR "ali: no more channels available on Bank A.\n");
3790#endif /* 0 */ 3795#endif /* 0 */
3791 return NULL; 3796 return NULL;
3792} 3797}
3793 3798
@@ -3812,9 +3817,9 @@ ali_alloc_rec_pcm_channel(struct trident_card *card)
3812 } 3817 }
3813 3818
3814 /* no free recordable channels avaliable */ 3819 /* no free recordable channels avaliable */
3815#if 0 3820#if 0
3816 printk(KERN_ERR "ali: no recordable channels available on Bank A.\n"); 3821 printk(KERN_ERR "ali: no recordable channels available on Bank A.\n");
3817#endif /* 0 */ 3822#endif /* 0 */
3818 return NULL; 3823 return NULL;
3819} 3824}
3820 3825
@@ -3837,14 +3842,14 @@ ali_set_spdif_out_rate(struct trident_card *card, unsigned int rate)
3837 break; 3842 break;
3838 } 3843 }
3839 3844
3840 /* select spdif_out */ 3845 /* select spdif_out */
3841 ch_st_sel = inb(TRID_REG(card, ALI_SPDIF_CTRL)) & ALI_SPDIF_OUT_CH_STATUS; 3846 ch_st_sel = inb(TRID_REG(card, ALI_SPDIF_CTRL)) & ALI_SPDIF_OUT_CH_STATUS;
3842 3847
3843 ch_st_sel |= 0x80; /* select right */ 3848 ch_st_sel |= 0x80; /* select right */
3844 outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL)); 3849 outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
3845 outb(status_rate | 0x20, TRID_REG(card, ALI_SPDIF_CS + 2)); 3850 outb(status_rate | 0x20, TRID_REG(card, ALI_SPDIF_CS + 2));
3846 3851
3847 ch_st_sel &= (~0x80); /* select left */ 3852 ch_st_sel &= (~0x80); /* select left */
3848 outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL)); 3853 outb(ch_st_sel, TRID_REG(card, ALI_SPDIF_CTRL));
3849 outw(status_rate | 0x10, TRID_REG(card, ALI_SPDIF_CS + 2)); 3854 outw(status_rate | 0x10, TRID_REG(card, ALI_SPDIF_CS + 2));
3850} 3855}
@@ -3881,14 +3886,14 @@ ali_address_interrupt(struct trident_card *card)
3881 } 3886 }
3882} 3887}
3883 3888
3884/* Updating the values of counters of other_states' DMAs without lock 3889/* Updating the values of counters of other_states' DMAs without lock
3885protection is no harm because all DMAs of multi-channels and interrupt 3890protection is no harm because all DMAs of multi-channels and interrupt
3886depend on a master state's DMA, and changing the counters of the master 3891depend on a master state's DMA, and changing the counters of the master
3887state DMA is protected by a spinlock. 3892state DMA is protected by a spinlock.
3888*/ 3893*/
3889static int 3894static int
3890ali_write_5_1(struct trident_state *state, const char __user *buf, 3895ali_write_5_1(struct trident_state *state, const char __user *buf,
3891 int cnt_for_multi_channel, unsigned int *copy_count, 3896 int cnt_for_multi_channel, unsigned int *copy_count,
3892 unsigned int *state_cnt) 3897 unsigned int *state_cnt)
3893{ 3898{
3894 3899
@@ -3904,10 +3909,10 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
3904 3909
3905 if ((i = state->multi_channels_adjust_count) > 0) { 3910 if ((i = state->multi_channels_adjust_count) > 0) {
3906 if (i == 1) { 3911 if (i == 1) {
3907 if (copy_from_user(dmabuf->rawbuf + swptr, 3912 if (copy_from_user(dmabuf->rawbuf + swptr,
3908 buffer, sample_s)) 3913 buffer, sample_s))
3909 return -EFAULT; 3914 return -EFAULT;
3910 seek_offset(swptr, buffer, cnt_for_multi_channel, 3915 seek_offset(swptr, buffer, cnt_for_multi_channel,
3911 sample_s, *copy_count); 3916 sample_s, *copy_count);
3912 i--; 3917 i--;
3913 (*state_cnt) += sample_s; 3918 (*state_cnt) += sample_s;
@@ -3916,10 +3921,10 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
3916 i = i - (state->chans_num - other_dma_nums); 3921 i = i - (state->chans_num - other_dma_nums);
3917 for (; (i < other_dma_nums) && (cnt_for_multi_channel > 0); i++) { 3922 for (; (i < other_dma_nums) && (cnt_for_multi_channel > 0); i++) {
3918 dmabuf_temp = &state->other_states[i]->dmabuf; 3923 dmabuf_temp = &state->other_states[i]->dmabuf;
3919 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 3924 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3920 buffer, sample_s)) 3925 buffer, sample_s))
3921 return -EFAULT; 3926 return -EFAULT;
3922 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 3927 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3923 sample_s, *copy_count); 3928 sample_s, *copy_count);
3924 } 3929 }
3925 if (cnt_for_multi_channel == 0) 3930 if (cnt_for_multi_channel == 0)
@@ -3928,39 +3933,39 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
3928 if (cnt_for_multi_channel > 0) { 3933 if (cnt_for_multi_channel > 0) {
3929 loop = cnt_for_multi_channel / (state->chans_num * sample_s); 3934 loop = cnt_for_multi_channel / (state->chans_num * sample_s);
3930 for (i = 0; i < loop; i++) { 3935 for (i = 0; i < loop; i++) {
3931 if (copy_from_user(dmabuf->rawbuf + swptr, buffer, 3936 if (copy_from_user(dmabuf->rawbuf + swptr, buffer,
3932 sample_s * 2)) 3937 sample_s * 2))
3933 return -EFAULT; 3938 return -EFAULT;
3934 seek_offset(swptr, buffer, cnt_for_multi_channel, 3939 seek_offset(swptr, buffer, cnt_for_multi_channel,
3935 sample_s * 2, *copy_count); 3940 sample_s * 2, *copy_count);
3936 (*state_cnt) += (sample_s * 2); 3941 (*state_cnt) += (sample_s * 2);
3937 3942
3938 dmabuf_temp = &state->other_states[0]->dmabuf; 3943 dmabuf_temp = &state->other_states[0]->dmabuf;
3939 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 3944 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3940 buffer, sample_s)) 3945 buffer, sample_s))
3941 return -EFAULT; 3946 return -EFAULT;
3942 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 3947 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3943 sample_s, *copy_count); 3948 sample_s, *copy_count);
3944 3949
3945 dmabuf_temp = &state->other_states[1]->dmabuf; 3950 dmabuf_temp = &state->other_states[1]->dmabuf;
3946 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 3951 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3947 buffer, sample_s)) 3952 buffer, sample_s))
3948 return -EFAULT; 3953 return -EFAULT;
3949 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 3954 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3950 sample_s, *copy_count); 3955 sample_s, *copy_count);
3951 3956
3952 dmabuf_temp = &state->other_states[2]->dmabuf; 3957 dmabuf_temp = &state->other_states[2]->dmabuf;
3953 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 3958 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3954 buffer, sample_s)) 3959 buffer, sample_s))
3955 return -EFAULT; 3960 return -EFAULT;
3956 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 3961 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3957 sample_s, *copy_count); 3962 sample_s, *copy_count);
3958 3963
3959 dmabuf_temp = &state->other_states[3]->dmabuf; 3964 dmabuf_temp = &state->other_states[3]->dmabuf;
3960 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr, 3965 if (copy_from_user(dmabuf_temp->rawbuf + dmabuf_temp->swptr,
3961 buffer, sample_s)) 3966 buffer, sample_s))
3962 return -EFAULT; 3967 return -EFAULT;
3963 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel, 3968 seek_offset(dmabuf_temp->swptr, buffer, cnt_for_multi_channel,
3964 sample_s, *copy_count); 3969 sample_s, *copy_count);
3965 } 3970 }
3966 3971
@@ -3969,15 +3974,15 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
3969 3974
3970 if (copy_from_user(dmabuf->rawbuf + swptr, buffer, sample_s)) 3975 if (copy_from_user(dmabuf->rawbuf + swptr, buffer, sample_s))
3971 return -EFAULT; 3976 return -EFAULT;
3972 seek_offset(swptr, buffer, cnt_for_multi_channel, 3977 seek_offset(swptr, buffer, cnt_for_multi_channel,
3973 sample_s, *copy_count); 3978 sample_s, *copy_count);
3974 (*state_cnt) += sample_s; 3979 (*state_cnt) += sample_s;
3975 3980
3976 if (cnt_for_multi_channel > 0) { 3981 if (cnt_for_multi_channel > 0) {
3977 if (copy_from_user(dmabuf->rawbuf + swptr, 3982 if (copy_from_user(dmabuf->rawbuf + swptr,
3978 buffer, sample_s)) 3983 buffer, sample_s))
3979 return -EFAULT; 3984 return -EFAULT;
3980 seek_offset(swptr, buffer, cnt_for_multi_channel, 3985 seek_offset(swptr, buffer, cnt_for_multi_channel,
3981 sample_s, *copy_count); 3986 sample_s, *copy_count);
3982 (*state_cnt) += sample_s; 3987 (*state_cnt) += sample_s;
3983 3988
@@ -3986,12 +3991,12 @@ ali_write_5_1(struct trident_state *state, const char __user *buf,
3986 loop = state->multi_channels_adjust_count - diff; 3991 loop = state->multi_channels_adjust_count - diff;
3987 for (i = 0; i < loop; i++) { 3992 for (i = 0; i < loop; i++) {
3988 dmabuf_temp = &state->other_states[i]->dmabuf; 3993 dmabuf_temp = &state->other_states[i]->dmabuf;
3989 if (copy_from_user(dmabuf_temp->rawbuf + 3994 if (copy_from_user(dmabuf_temp->rawbuf +
3990 dmabuf_temp->swptr, 3995 dmabuf_temp->swptr,
3991 buffer, sample_s)) 3996 buffer, sample_s))
3992 return -EFAULT; 3997 return -EFAULT;
3993 seek_offset(dmabuf_temp->swptr, buffer, 3998 seek_offset(dmabuf_temp->swptr, buffer,
3994 cnt_for_multi_channel, 3999 cnt_for_multi_channel,
3995 sample_s, *copy_count); 4000 sample_s, *copy_count);
3996 } 4001 }
3997 } 4002 }
@@ -4048,11 +4053,11 @@ ali_write_proc(struct file *file, const char __user *buffer, unsigned long count
4048 ali_disable_special_channel(card, ALI_SPDIF_OUT_CHANNEL); 4053 ali_disable_special_channel(card, ALI_SPDIF_OUT_CHANNEL);
4049 break; 4054 break;
4050 case '1': 4055 case '1':
4051 ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT | 4056 ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
4052 ALI_SPDIF_OUT_PCM); 4057 ALI_SPDIF_OUT_PCM);
4053 break; 4058 break;
4054 case '2': 4059 case '2':
4055 ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT | 4060 ali_setup_spdif_out(card, ALI_SPDIF_OUT_TO_SPDIF_OUT |
4056 ALI_SPDIF_OUT_NON_PCM); 4061 ALI_SPDIF_OUT_NON_PCM);
4057 break; 4062 break;
4058 case '3': 4063 case '3':
@@ -4077,7 +4082,7 @@ trident_open_mixdev(struct inode *inode, struct file *file)
4077 4082
4078 for (card = devs; card != NULL; card = card->next) 4083 for (card = devs; card != NULL; card = card->next)
4079 for (i = 0; i < NR_AC97; i++) 4084 for (i = 0; i < NR_AC97; i++)
4080 if (card->ac97_codec[i] != NULL && 4085 if (card->ac97_codec[i] != NULL &&
4081 card->ac97_codec[i]->dev_mixer == minor) 4086 card->ac97_codec[i]->dev_mixer == minor)
4082 goto match; 4087 goto match;
4083 4088
@@ -4091,7 +4096,7 @@ trident_open_mixdev(struct inode *inode, struct file *file)
4091} 4096}
4092 4097
4093static int 4098static int
4094trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, 4099trident_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
4095 unsigned long arg) 4100 unsigned long arg)
4096{ 4101{
4097 struct ac97_codec *codec = (struct ac97_codec *) file->private_data; 4102 struct ac97_codec *codec = (struct ac97_codec *) file->private_data;
@@ -4185,9 +4190,9 @@ trident_ac97_init(struct trident_card *card)
4185 /* disable AC97 GPIO interrupt */ 4190 /* disable AC97 GPIO interrupt */
4186 outl(0x00, TRID_REG(card, SI_AC97_GPIO)); 4191 outl(0x00, TRID_REG(card, SI_AC97_GPIO));
4187 /* when power up the AC link is in cold reset mode so stop it */ 4192 /* when power up the AC link is in cold reset mode so stop it */
4188 outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID, 4193 outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT | SECONDARY_ID,
4189 TRID_REG(card, SI_SERIAL_INTF_CTRL)); 4194 TRID_REG(card, SI_SERIAL_INTF_CTRL));
4190 /* it take a long time to recover from a cold reset */ 4195 /* it take a long time to recover from a cold reset */
4191 /* (especially when you have more than one codec) */ 4196 /* (especially when you have more than one codec) */
4192 udelay(2000); 4197 udelay(2000);
4193 ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL)); 4198 ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
@@ -4207,9 +4212,9 @@ trident_ac97_init(struct trident_card *card)
4207 /* disable AC97 GPIO interrupt */ 4212 /* disable AC97 GPIO interrupt */
4208 outl(0x00, TRID_REG(card, SI_AC97_GPIO)); 4213 outl(0x00, TRID_REG(card, SI_AC97_GPIO));
4209 /* when power up, the AC link is in cold reset mode, so stop it */ 4214 /* when power up, the AC link is in cold reset mode, so stop it */
4210 outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT, 4215 outl(PCMOUT | SURROUT | CENTEROUT | LFEOUT,
4211 TRID_REG(card, SI_SERIAL_INTF_CTRL)); 4216 TRID_REG(card, SI_SERIAL_INTF_CTRL));
4212 /* it take a long time to recover from a cold reset (especially */ 4217 /* it take a long time to recover from a cold reset (especially */
4213 /* when you have more than one codec) */ 4218 /* when you have more than one codec) */
4214 udelay(2000); 4219 udelay(2000);
4215 ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL)); 4220 ready_2nd = inl(TRID_REG(card, SI_SERIAL_INTF_CTRL));
@@ -4221,7 +4226,7 @@ trident_ac97_init(struct trident_card *card)
4221 if ((codec = ac97_alloc_codec()) == NULL) 4226 if ((codec = ac97_alloc_codec()) == NULL)
4222 return -ENOMEM; 4227 return -ENOMEM;
4223 4228
4224 /* initialize some basic codec information, other fields */ 4229 /* initialize some basic codec information, other fields */
4225 /* will be filled in ac97_probe_codec */ 4230 /* will be filled in ac97_probe_codec */
4226 codec->private_data = card; 4231 codec->private_data = card;
4227 codec->id = num_ac97; 4232 codec->id = num_ac97;
@@ -4352,8 +4357,8 @@ static inline int trident_register_gameport(struct trident_card *card) { return
4352static inline void trident_unregister_gameport(struct trident_card *card) { } 4357static inline void trident_unregister_gameport(struct trident_card *card) { }
4353#endif /* SUPPORT_JOYSTICK */ 4358#endif /* SUPPORT_JOYSTICK */
4354 4359
4355/* install the driver, we do not allocate hardware channel nor DMA buffer */ 4360/* install the driver, we do not allocate hardware channel nor DMA buffer */
4356/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */ 4361/* now, they are defered until "ACCESS" time (in prog_dmabuf called by */
4357/* open/read/write/ioctl/mmap) */ 4362/* open/read/write/ioctl/mmap) */
4358static int __devinit 4363static int __devinit
4359trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) 4364trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
@@ -4376,9 +4381,9 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4376 else 4381 else
4377 dma_mask = TRIDENT_DMA_MASK; 4382 dma_mask = TRIDENT_DMA_MASK;
4378 if (pci_set_dma_mask(pci_dev, dma_mask)) { 4383 if (pci_set_dma_mask(pci_dev, dma_mask)) {
4379 printk(KERN_ERR "trident: architecture does not support" 4384 printk(KERN_ERR "trident: architecture does not support"
4380 " %s PCI busmaster DMA\n", 4385 " %s PCI busmaster DMA\n",
4381 pci_dev->device == PCI_DEVICE_ID_ALI_5451 ? 4386 pci_dev->device == PCI_DEVICE_ID_ALI_5451 ?
4382 "32-bit" : "30-bit"); 4387 "32-bit" : "30-bit");
4383 goto out; 4388 goto out;
4384 } 4389 }
@@ -4422,7 +4427,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4422 4427
4423 pci_set_master(pci_dev); 4428 pci_set_master(pci_dev);
4424 4429
4425 printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n", 4430 printk(KERN_INFO "trident: %s found at IO 0x%04lx, IRQ %d\n",
4426 card_names[pci_id->driver_data], card->iobase, card->irq); 4431 card_names[pci_id->driver_data], card->iobase, card->irq);
4427 4432
4428 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) { 4433 if (card->pci_id == PCI_DEVICE_ID_ALI_5451) {
@@ -4449,9 +4454,9 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4449 4454
4450 /* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */ 4455 /* Add H/W Volume Control By Matt Wu Jul. 06, 2001 */
4451 card->hwvolctl = 0; 4456 card->hwvolctl = 0;
4452 pci_dev_m1533 = pci_find_device(PCI_VENDOR_ID_AL, 4457 pci_dev_m1533 = pci_get_device(PCI_VENDOR_ID_AL,
4453 PCI_DEVICE_ID_AL_M1533, 4458 PCI_DEVICE_ID_AL_M1533,
4454 pci_dev_m1533); 4459 pci_dev_m1533);
4455 rc = -ENODEV; 4460 rc = -ENODEV;
4456 if (pci_dev_m1533 == NULL) 4461 if (pci_dev_m1533 == NULL)
4457 goto out_proc_fs; 4462 goto out_proc_fs;
@@ -4465,6 +4470,8 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4465 bits &= 0xbf; /*clear bit 6 */ 4470 bits &= 0xbf; /*clear bit 6 */
4466 pci_write_config_byte(pci_dev_m1533, 0x7b, bits); 4471 pci_write_config_byte(pci_dev_m1533, 0x7b, bits);
4467 } 4472 }
4473 pci_dev_put(pci_dev_m1533);
4474
4468 } else if (card->pci_id == PCI_DEVICE_ID_INTERG_5050) { 4475 } else if (card->pci_id == PCI_DEVICE_ID_INTERG_5050) {
4469 card->alloc_pcm_channel = cyber_alloc_pcm_channel; 4476 card->alloc_pcm_channel = cyber_alloc_pcm_channel;
4470 card->alloc_rec_pcm_channel = cyber_alloc_pcm_channel; 4477 card->alloc_rec_pcm_channel = cyber_alloc_pcm_channel;
@@ -4482,7 +4489,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4482 rc = -ENODEV; 4489 rc = -ENODEV;
4483 if (request_irq(card->irq, &trident_interrupt, IRQF_SHARED, 4490 if (request_irq(card->irq, &trident_interrupt, IRQF_SHARED,
4484 card_names[pci_id->driver_data], card)) { 4491 card_names[pci_id->driver_data], card)) {
4485 printk(KERN_ERR "trident: unable to allocate irq %d\n", 4492 printk(KERN_ERR "trident: unable to allocate irq %d\n",
4486 card->irq); 4493 card->irq);
4487 goto out_proc_fs; 4494 goto out_proc_fs;
4488 } 4495 }
@@ -4533,7 +4540,7 @@ trident_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
4533 printk(KERN_INFO "trident: Running on Alpha system " 4540 printk(KERN_INFO "trident: Running on Alpha system "
4534 "type Nautilus\n"); 4541 "type Nautilus\n");
4535 ac97_data = ali_ac97_get(card, 0, AC97_POWER_CONTROL); 4542 ac97_data = ali_ac97_get(card, 0, AC97_POWER_CONTROL);
4536 ali_ac97_set(card, 0, AC97_POWER_CONTROL, 4543 ali_ac97_set(card, 0, AC97_POWER_CONTROL,
4537 ac97_data | ALI_EAPD_POWER_DOWN); 4544 ac97_data | ALI_EAPD_POWER_DOWN);
4538 } 4545 }
4539 } 4546 }
@@ -4566,7 +4573,7 @@ out_proc_fs:
4566 devs = NULL; 4573 devs = NULL;
4567out_release_region: 4574out_release_region:
4568 release_region(iobase, 256); 4575 release_region(iobase, 256);
4569 return rc; 4576 return rc;
4570} 4577}
4571 4578
4572static void __devexit 4579static void __devexit
@@ -4634,8 +4641,8 @@ static struct pci_driver trident_pci_driver = {
4634static int __init 4641static int __init
4635trident_init_module(void) 4642trident_init_module(void)
4636{ 4643{
4637 printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro " 4644 printk(KERN_INFO "Trident 4DWave/SiS 7018/ALi 5451,Tvia CyberPro "
4638 "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " " 4645 "5050 PCI Audio, version " DRIVER_VERSION ", " __TIME__ " "
4639 __DATE__ "\n"); 4646 __DATE__ "\n");
4640 4647
4641 return pci_register_driver(&trident_pci_driver); 4648 return pci_register_driver(&trident_pci_driver);
diff --git a/sound/pci/mixart/mixart_hwdep.c b/sound/pci/mixart/mixart_hwdep.c
index 1d9232d2db34..170781a72292 100644
--- a/sound/pci/mixart/mixart_hwdep.c
+++ b/sound/pci/mixart/mixart_hwdep.c
@@ -24,6 +24,7 @@
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/firmware.h> 26#include <linux/firmware.h>
27#include <linux/vmalloc.h>
27#include <asm/io.h> 28#include <asm/io.h>
28#include <sound/core.h> 29#include <sound/core.h>
29#include "mixart.h" 30#include "mixart.h"