aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/IPMI.txt13
-rw-r--r--Documentation/RCU/NMI-RCU.txt112
-rw-r--r--Documentation/cdrom/sonycd5353
-rw-r--r--Documentation/cpusets.txt12
-rw-r--r--Documentation/dcdbas.txt91
-rw-r--r--Documentation/dell_rbu.txt74
-rw-r--r--Documentation/dvb/bt8xx.txt2
-rw-r--r--Documentation/exception.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt16
-rw-r--r--Documentation/filesystems/relayfs.txt362
-rw-r--r--Documentation/i386/boot.txt35
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/power/swsusp.txt101
-rw-r--r--Documentation/power/video.txt1
-rw-r--r--Documentation/sonypi.txt10
-rw-r--r--MAINTAINERS15
-rw-r--r--arch/alpha/Kconfig3
-rw-r--r--arch/alpha/kernel/time.c7
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/compressed/head-sharpsl.S111
-rw-r--r--arch/arm/configs/omap_h2_1610_defconfig290
-rw-r--r--arch/arm/kernel/time.c7
-rw-r--r--arch/arm/mach-footbridge/Kconfig1
-rw-r--r--arch/arm/mach-iop3xx/iop321-time.c2
-rw-r--r--arch/arm/mach-iop3xx/iop331-time.c2
-rw-r--r--arch/arm/mach-ixp2000/core.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c16
-rw-r--r--arch/arm/mach-omap1/irq.c8
-rw-r--r--arch/arm/mach-pxa/Makefile2
-rw-r--r--arch/arm/mach-pxa/corgi.c55
-rw-r--r--arch/arm/mach-pxa/corgi_lcd.c396
-rw-r--r--arch/arm/mach-s3c2410/Kconfig15
-rw-r--r--arch/arm/mach-s3c2410/Makefile5
-rw-r--r--arch/arm/mach-s3c2410/bast-irq.c77
-rw-r--r--arch/arm/mach-s3c2410/mach-anubis.c270
-rw-r--r--arch/arm/mach-s3c2410/pm-simtec.c2
-rw-r--r--arch/arm/mach-s3c2410/time.c2
-rw-r--r--arch/arm/plat-omap/Kconfig16
-rw-r--r--arch/arm/plat-omap/Makefile4
-rw-r--r--arch/arm/plat-omap/clock.c39
-rw-r--r--arch/arm/plat-omap/common.c7
-rw-r--r--arch/arm/plat-omap/dma.c25
-rw-r--r--arch/arm/plat-omap/dmtimer.c260
-rw-r--r--arch/arm/plat-omap/gpio.c524
-rw-r--r--arch/arm/plat-omap/mcbsp.c9
-rw-r--r--arch/arm/plat-omap/mux.c3
-rw-r--r--arch/arm/plat-omap/ocpi.c1
-rw-r--r--arch/arm/plat-omap/pm.c255
-rw-r--r--arch/arm/plat-omap/sleep.S83
-rw-r--r--arch/arm/plat-omap/sram-fn.S58
-rw-r--r--arch/arm/plat-omap/sram.c116
-rw-r--r--arch/arm/plat-omap/sram.h21
-rw-r--r--arch/arm/plat-omap/usb.c1
-rw-r--r--arch/arm26/Kconfig4
-rw-r--r--arch/arm26/Makefile4
-rw-r--r--arch/arm26/kernel/time.c7
-rw-r--r--arch/cris/arch-v10/kernel/time.c2
-rw-r--r--arch/cris/kernel/time.c5
-rw-r--r--arch/frv/kernel/time.c7
-rw-r--r--arch/h8300/kernel/time.c5
-rw-r--r--arch/i386/Kconfig9
-rw-r--r--arch/i386/boot/setup.S2
-rw-r--r--arch/i386/boot/tools/build.c4
-rw-r--r--arch/i386/kernel/dmi_scan.c231
-rw-r--r--arch/i386/kernel/entry.S13
-rw-r--r--arch/i386/kernel/io_apic.c65
-rw-r--r--arch/i386/kernel/kprobes.c35
-rw-r--r--arch/i386/kernel/nmi.c5
-rw-r--r--arch/i386/kernel/setup.c2
-rw-r--r--arch/i386/kernel/time.c13
-rw-r--r--arch/i386/kernel/timers/timer_hpet.c4
-rw-r--r--arch/i386/kernel/traps.c16
-rw-r--r--arch/i386/kernel/vmlinux.lds.S1
-rw-r--r--arch/i386/mach-default/topology.c4
-rw-r--r--arch/i386/mm/discontig.c8
-rw-r--r--arch/i386/mm/fault.c4
-rw-r--r--arch/i386/mm/init.c2
-rw-r--r--arch/i386/oprofile/init.c12
-rw-r--r--arch/i386/oprofile/nmi_int.c4
-rw-r--r--arch/i386/oprofile/nmi_timer_int.c2
-rw-r--r--arch/ia64/Kconfig5
-rw-r--r--arch/ia64/hp/sim/simserial.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/sys_ia32.c31
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/domain.c396
-rw-r--r--arch/ia64/kernel/irq.c39
-rw-r--r--arch/ia64/kernel/jprobes.S1
-rw-r--r--arch/ia64/kernel/kprobes.c124
-rw-r--r--arch/ia64/kernel/traps.c5
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/lib/flush.S1
-rw-r--r--arch/ia64/mm/fault.c3
-rw-r--r--arch/ia64/sn/kernel/io_init.c2
-rw-r--r--arch/ia64/sn/kernel/tiocx.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c8
-rw-r--r--arch/m32r/kernel/time.c7
-rw-r--r--arch/m68k/Kconfig5
-rw-r--r--arch/m68k/bvme6000/rtc.c5
-rw-r--r--arch/m68k/kernel/time.c5
-rw-r--r--arch/m68k/mvme16x/rtc.c4
-rw-r--r--arch/m68knommu/kernel/time.c7
-rw-r--r--arch/mips/Kconfig5
-rw-r--r--arch/mips/kernel/linux32.c16
-rw-r--r--arch/mips/kernel/sysirix.c5
-rw-r--r--arch/mips/kernel/time.c7
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c2
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/ppc/Kconfig4
-rw-r--r--arch/ppc/kernel/time.c7
-rw-r--r--arch/ppc/syslib/ocp.c2
-rw-r--r--arch/ppc64/Kconfig4
-rw-r--r--arch/ppc64/Makefile2
-rw-r--r--arch/ppc64/kernel/kprobes.c40
-rw-r--r--arch/ppc64/kernel/misc.S6
-rw-r--r--arch/ppc64/kernel/pSeries_reconfig.c2
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c31
-rw-r--r--arch/ppc64/kernel/time.c7
-rw-r--r--arch/ppc64/kernel/traps.c5
-rw-r--r--arch/ppc64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ppc64/mm/fault.c5
-rw-r--r--arch/s390/kernel/time.c5
-rw-r--r--arch/sh/Kconfig4
-rw-r--r--arch/sh/kernel/time.c11
-rw-r--r--arch/sh64/kernel/time.c11
-rw-r--r--arch/sparc/Kconfig4
-rw-r--r--arch/sparc/kernel/pcic.c5
-rw-r--r--arch/sparc/kernel/time.c7
-rw-r--r--arch/sparc64/Kconfig4
-rw-r--r--arch/sparc64/kernel/kprobes.c36
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c9
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c24
-rw-r--r--arch/sparc64/kernel/time.c2
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S1
-rw-r--r--arch/sparc64/mm/fault.c8
-rw-r--r--arch/sparc64/mm/init.c3
-rw-r--r--arch/sparc64/mm/ultra.S2
-rw-r--r--arch/um/Makefile-i3862
-rw-r--r--arch/um/include/common-offsets.h1
-rw-r--r--arch/um/include/um_uaccess.h7
-rw-r--r--arch/um/kernel/mem.c2
-rw-r--r--arch/um/os-Linux/Makefile3
-rw-r--r--arch/um/os-Linux/elf_aux.c3
-rw-r--r--arch/um/sys-i386/kernel-offsets.c1
-rw-r--r--arch/um/sys-x86_64/kernel-offsets.c1
-rw-r--r--arch/v850/kernel/time.c7
-rw-r--r--arch/x86_64/Kconfig9
-rw-r--r--arch/x86_64/boot/setup.S2
-rw-r--r--arch/x86_64/boot/tools/build.c4
-rw-r--r--arch/x86_64/ia32/ia32entry.S2
-rw-r--r--arch/x86_64/ia32/sys_ia32.c26
-rw-r--r--arch/x86_64/kernel/e820.c2
-rw-r--r--arch/x86_64/kernel/entry.S12
-rw-r--r--arch/x86_64/kernel/genapic.c2
-rw-r--r--arch/x86_64/kernel/genapic_cluster.c6
-rw-r--r--arch/x86_64/kernel/io_apic.c112
-rw-r--r--arch/x86_64/kernel/kprobes.c41
-rw-r--r--arch/x86_64/kernel/nmi.c6
-rw-r--r--arch/x86_64/kernel/process.c1
-rw-r--r--arch/x86_64/kernel/setup.c2
-rw-r--r--arch/x86_64/kernel/setup64.c2
-rw-r--r--arch/x86_64/kernel/smpboot.c10
-rw-r--r--arch/x86_64/kernel/time.c8
-rw-r--r--arch/x86_64/kernel/traps.c14
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86_64/mm/fault.c4
-rw-r--r--arch/x86_64/mm/numa.c6
-rw-r--r--arch/xtensa/kernel/time.c7
-rw-r--r--crypto/cipher.c12
-rw-r--r--drivers/base/firmware_class.c79
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/aoe/aoedev.c2
-rw-r--r--drivers/block/cfq-iosched.c3
-rw-r--r--drivers/block/deadline-iosched.c12
-rw-r--r--drivers/block/genhd.c2
-rw-r--r--drivers/block/ll_rw_blk.c4
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/digi1.h38
-rw-r--r--drivers/char/digiFep1.h154
-rw-r--r--drivers/char/epca.c1588
-rw-r--r--drivers/char/epca.h108
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c69
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c101
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c3
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c336
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c168
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c395
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c3
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c46
-rw-r--r--drivers/char/mbcs.c2
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c9
-rw-r--r--drivers/char/sonypi.c118
-rw-r--r--drivers/char/tpm/tpm_atmel.c3
-rw-r--r--drivers/char/tty_io.c6
-rw-r--r--drivers/char/vt.c21
-rw-r--r--drivers/firmware/Kconfig27
-rw-r--r--drivers/firmware/Makefile2
-rw-r--r--drivers/firmware/dcdbas.c596
-rw-r--r--drivers/firmware/dcdbas.h107
-rw-r--r--drivers/firmware/dell_rbu.c634
-rw-r--r--drivers/i2c/chips/isp1301_omap.c2
-rw-r--r--drivers/ieee1394/nodemgr.c8
-rw-r--r--drivers/infiniband/core/sysfs.c2
-rw-r--r--drivers/input/evdev.c8
-rw-r--r--drivers/input/gameport/emu10k1-gp.c2
-rw-r--r--drivers/input/gameport/fm801-gp.c2
-rw-r--r--drivers/input/gameport/ns558.c4
-rw-r--r--drivers/input/input.c11
-rw-r--r--drivers/input/joystick/a3d.c2
-rw-r--r--drivers/input/joystick/adi.c2
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/cobra.c2
-rw-r--r--drivers/input/joystick/db9.c2
-rw-r--r--drivers/input/joystick/gamecon.c2
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/grip.c2
-rw-r--r--drivers/input/joystick/grip_mp.c2
-rw-r--r--drivers/input/joystick/guillemot.c2
-rw-r--r--drivers/input/joystick/interact.c2
-rw-r--r--drivers/input/joystick/sidewinder.c2
-rw-r--r--drivers/input/joystick/tmdc.c2
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/keyboard/corgikbd.c104
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/serio/serport.c4
-rw-r--r--drivers/input/touchscreen/corgi_ts.c51
-rw-r--r--drivers/isdn/hisax/hisax.h3
-rw-r--r--drivers/isdn/i4l/isdn_v110.c4
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm.c6
-rw-r--r--drivers/media/dvb/bt8xx/Kconfig6
-rw-r--r--drivers/media/dvb/frontends/lgdt330x.c50
-rw-r--r--drivers/media/video/Makefile2
-rw-r--r--drivers/media/video/adv7170.c1
-rw-r--r--drivers/media/video/adv7175.c1
-rw-r--r--drivers/media/video/bt819.c1
-rw-r--r--drivers/media/video/bt856.c1
-rw-r--r--drivers/media/video/indycam.c412
-rw-r--r--drivers/media/video/indycam.h112
-rw-r--r--drivers/media/video/meye.c3
-rw-r--r--drivers/media/video/saa7111.c1
-rw-r--r--drivers/media/video/saa7114.c1
-rw-r--r--drivers/media/video/saa7185.c1
-rw-r--r--drivers/media/video/saa7191.c512
-rw-r--r--drivers/media/video/saa7191.h139
-rw-r--r--drivers/media/video/vino.c4273
-rw-r--r--drivers/media/video/vino.h61
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/ibmasm/uart.c20
-rw-r--r--drivers/mmc/mmc.c518
-rw-r--r--drivers/mmc/mmc_block.c9
-rw-r--r--drivers/mmc/mmc_sysfs.c21
-rw-r--r--drivers/mmc/pxamci.c11
-rw-r--r--drivers/mmc/wbsd.c60
-rw-r--r--drivers/mmc/wbsd.h3
-rw-r--r--drivers/mtd/nand/nand_base.c1
-rw-r--r--drivers/net/3c59x.c19
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/arcnet/arcnet.c25
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/forcedeth.c4
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c1179
-rw-r--r--drivers/net/irda/smsc-ircc2.h50
-rw-r--r--drivers/net/iseries_veth.c1
-rw-r--r--drivers/net/s2io-regs.h13
-rw-r--r--drivers/net/s2io.c108
-rw-r--r--drivers/net/s2io.h5
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/spider_net.c2334
-rw-r--r--drivers/net/spider_net.h469
-rw-r--r--drivers/net/spider_net_ethtool.c126
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/wireless/airo.c43
-rw-r--r--drivers/net/wireless/atmel.c17
-rw-r--r--drivers/net/wireless/ipw2200.c2270
-rw-r--r--drivers/net/wireless/ipw2200.h406
-rw-r--r--drivers/net/wireless/netwave_cs.c7
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c3
-rw-r--r--drivers/net/wireless/ray_cs.c866
-rw-r--r--drivers/net/wireless/ray_cs.h7
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c7
-rw-r--r--drivers/parport/parport_pc.c3
-rw-r--r--drivers/pci/msi.c17
-rw-r--r--drivers/pci/msi.h5
-rw-r--r--drivers/pcmcia/topic.h17
-rw-r--r--drivers/pcmcia/yenta_socket.c125
-rw-r--r--drivers/pcmcia/yenta_socket.h8
-rw-r--r--drivers/pnp/card.c7
-rw-r--r--drivers/pnp/driver.c7
-rw-r--r--drivers/pnp/isapnp/core.c33
-rw-r--r--drivers/pnp/manager.c7
-rw-r--r--drivers/pnp/pnpacpi/core.c16
-rw-r--r--drivers/pnp/pnpacpi/pnpacpi.h1
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c18
-rw-r--r--drivers/pnp/pnpbios/core.c26
-rw-r--r--drivers/pnp/pnpbios/pnpbios.h1
-rw-r--r--drivers/pnp/pnpbios/proc.c8
-rw-r--r--drivers/pnp/pnpbios/rsparser.c16
-rw-r--r--drivers/pnp/quirks.c7
-rw-r--r--drivers/pnp/support.c7
-rw-r--r--drivers/s390/net/claw.c20
-rw-r--r--drivers/scsi/NCR5380.c9
-rw-r--r--drivers/scsi/NCR53c406a.c2
-rw-r--r--drivers/scsi/sata_qstor.c2
-rw-r--r--drivers/serial/68328serial.c1
-rw-r--r--drivers/serial/68360serial.c8
-rw-r--r--drivers/serial/crisv10.c1
-rw-r--r--drivers/serial/icom.c1
-rw-r--r--drivers/serial/mcfserial.c1
-rw-r--r--drivers/serial/serial_lh7a40x.c4
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/host/ehci-sched.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/input/acecad.c2
-rw-r--r--drivers/usb/input/itmtouch.c2
-rw-r--r--drivers/usb/input/pid.c2
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/locomolcd.c157
-rw-r--r--drivers/video/q40fb.c1
-rw-r--r--drivers/video/w100fb.c1912
-rw-r--r--drivers/video/w100fb.h777
-rw-r--r--fs/Kconfig21
-rw-r--r--fs/Makefile1
-rw-r--r--fs/bio.c32
-rw-r--r--fs/buffer.c40
-rw-r--r--fs/cifs/connect.c82
-rw-r--r--fs/cifs/dir.c27
-rw-r--r--fs/compat.c116
-rw-r--r--fs/cramfs/inode.c43
-rw-r--r--fs/ext2/super.c59
-rw-r--r--fs/ext3/super.c92
-rw-r--r--fs/fat/dir.c28
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/freevxfs/vxfs_super.c2
-rw-r--r--fs/hfs/bnode.c21
-rw-r--r--fs/hfs/catalog.c35
-rw-r--r--fs/hfs/dir.c11
-rw-r--r--fs/hfs/hfs.h1
-rw-r--r--fs/hfs/hfs_fs.h8
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfs/mdb.c6
-rw-r--r--fs/hfs/super.c68
-rw-r--r--fs/hfs/trans.c116
-rw-r--r--fs/hfsplus/bnode.c21
-rw-r--r--fs/hfsplus/hfsplus_fs.h5
-rw-r--r--fs/hfsplus/options.c26
-rw-r--r--fs/hfsplus/super.c11
-rw-r--r--fs/hostfs/hostfs.h1
-rw-r--r--fs/inode.c3
-rw-r--r--fs/inotify.c16
-rw-r--r--fs/jbd/checkpoint.c2
-rw-r--r--fs/jbd/commit.c38
-rw-r--r--fs/jbd/journal.c38
-rw-r--r--fs/jbd/revoke.c5
-rw-r--r--fs/jbd/transaction.c39
-rw-r--r--fs/jffs/inode-v23.c3
-rw-r--r--fs/jffs2/file.c3
-rw-r--r--fs/jfs/jfs_filsys.h3
-rw-r--r--fs/jfs/super.c48
-rw-r--r--fs/namei.c52
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/nfsd/export.c3
-rw-r--r--fs/nfsd/nfs4idmap.c8
-rw-r--r--fs/open.c19
-rw-r--r--fs/pipe.c13
-rw-r--r--fs/proc/base.c63
-rw-r--r--fs/proc/generic.c13
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/relayfs/Makefile4
-rw-r--r--fs/relayfs/buffers.c189
-rw-r--r--fs/relayfs/buffers.h12
-rw-r--r--fs/relayfs/inode.c609
-rw-r--r--fs/relayfs/relay.c431
-rw-r--r--fs/relayfs/relay.h12
-rw-r--r--fs/ufs/balloc.c12
-rw-r--r--fs/ufs/ialloc.c6
-rw-r--r--fs/ufs/truncate.c9
-rw-r--r--fs/umsdos/notes17
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/Makefile151
-rw-r--r--fs/xfs/Makefile-linux-2.6141
-rw-r--r--fs/xfs/linux-2.6/kmem.c23
-rw-r--r--fs/xfs/linux-2.6/kmem.h23
-rw-r--r--fs/xfs/linux-2.6/spin.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c259
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h50
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c117
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c90
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c18
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c65
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c15
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h13
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c166
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.c1
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.c251
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h60
-rw-r--r--fs/xfs/quota/Makefile1
-rw-r--r--fs/xfs/quota/Makefile-linux-2.653
-rw-r--r--fs/xfs/quota/xfs_dquot.c43
-rw-r--r--fs/xfs/quota/xfs_dquot.h16
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c1
-rw-r--r--fs/xfs/quota/xfs_qm.c26
-rw-r--r--fs/xfs/quota/xfs_qm.h2
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c44
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c16
-rw-r--r--fs/xfs/support/debug.c1
-rw-r--r--fs/xfs/xfs_acl.c6
-rw-r--r--fs/xfs/xfs_bmap.c12
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_dmapi.h2
-rw-r--r--fs/xfs/xfs_extfree_item.c2
-rw-r--r--fs/xfs/xfs_iget.c35
-rw-r--r--fs/xfs/xfs_inode.c3
-rw-r--r--fs/xfs/xfs_inode_item.c9
-rw-r--r--fs/xfs/xfs_iomap.c22
-rw-r--r--fs/xfs/xfs_log.c215
-rw-r--r--fs/xfs/xfs_log.h38
-rw-r--r--fs/xfs/xfs_log_priv.h68
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_qmops.c78
-rw-r--r--fs/xfs/xfs_quota.h17
-rw-r--r--fs/xfs/xfs_trans.c3
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_ail.c2
-rw-r--r--fs/xfs/xfs_trans_buf.c23
-rw-r--r--fs/xfs/xfs_vfsops.c62
-rw-r--r--fs/xfs/xfs_vnodeops.c92
-rw-r--r--include/asm-alpha/auxvec.h24
-rw-r--r--include/asm-alpha/elf.h22
-rw-r--r--include/asm-alpha/fcntl.h35
-rw-r--r--include/asm-alpha/futex.h53
-rw-r--r--include/asm-alpha/hdreg.h1
-rw-r--r--include/asm-alpha/uaccess.h6
-rw-r--r--include/asm-arm/arch-omap/board-h4.h3
-rw-r--r--include/asm-arm/arch-omap/board-innovator.h25
-rw-r--r--include/asm-arm/arch-omap/board-perseus2.h17
-rw-r--r--include/asm-arm/arch-omap/board-voiceblue.h5
-rw-r--r--include/asm-arm/arch-omap/board.h19
-rw-r--r--include/asm-arm/arch-omap/cpu.h187
-rw-r--r--include/asm-arm/arch-omap/debug-macro.S13
-rw-r--r--include/asm-arm/arch-omap/dma.h1
-rw-r--r--include/asm-arm/arch-omap/dmtimer.h92
-rw-r--r--include/asm-arm/arch-omap/dsp.h244
-rw-r--r--include/asm-arm/arch-omap/dsp_common.h37
-rw-r--r--include/asm-arm/arch-omap/entry-macro.S28
-rw-r--r--include/asm-arm/arch-omap/gpio.h28
-rw-r--r--include/asm-arm/arch-omap/hardware.h39
-rw-r--r--include/asm-arm/arch-omap/io.h26
-rw-r--r--include/asm-arm/arch-omap/irqs.h6
-rw-r--r--include/asm-arm/arch-omap/memory.h14
-rw-r--r--include/asm-arm/arch-omap/mtd-xip.h61
-rw-r--r--include/asm-arm/arch-omap/mux.h10
-rw-r--r--include/asm-arm/arch-omap/omap1510.h13
-rw-r--r--include/asm-arm/arch-omap/omap16xx.h16
-rw-r--r--include/asm-arm/arch-omap/omap24xx.h15
-rw-r--r--include/asm-arm/arch-omap/omap730.h4
-rw-r--r--include/asm-arm/arch-omap/pm.h55
-rw-r--r--include/asm-arm/arch-omap/serial.h37
-rw-r--r--include/asm-arm/arch-omap/uncompress.h10
-rw-r--r--include/asm-arm/arch-pxa/corgi.h8
-rw-r--r--include/asm-arm/arch-pxa/mmc.h1
-rw-r--r--include/asm-arm/arch-s3c2410/anubis-cpld.h24
-rw-r--r--include/asm-arm/arch-s3c2410/anubis-irq.h23
-rw-r--r--include/asm-arm/arch-s3c2410/anubis-map.h46
-rw-r--r--include/asm-arm/auxvec.h4
-rw-r--r--include/asm-arm/fcntl.h78
-rw-r--r--include/asm-arm/futex.h53
-rw-r--r--include/asm-arm/hdreg.h1
-rw-r--r--include/asm-arm/uaccess.h6
-rw-r--r--include/asm-arm26/auxvec.h4
-rw-r--r--include/asm-arm26/fcntl.h76
-rw-r--r--include/asm-arm26/futex.h53
-rw-r--r--include/asm-arm26/hdreg.h1
-rw-r--r--include/asm-arm26/uaccess.h6
-rw-r--r--include/asm-cris/auxvec.h4
-rw-r--r--include/asm-cris/fcntl.h91
-rw-r--r--include/asm-cris/futex.h53
-rw-r--r--include/asm-cris/irq.h5
-rw-r--r--include/asm-cris/uaccess.h7
-rw-r--r--include/asm-frv/auxvec.h4
-rw-r--r--include/asm-frv/fcntl.h89
-rw-r--r--include/asm-frv/futex.h53
-rw-r--r--include/asm-frv/uaccess.h6
-rw-r--r--include/asm-generic/fcntl.h149
-rw-r--r--include/asm-generic/hdreg.h8
-rw-r--r--include/asm-generic/sections.h1
-rw-r--r--include/asm-generic/unaligned.h12
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/asm-h8300/auxvec.h4
-rw-r--r--include/asm-h8300/fcntl.h78
-rw-r--r--include/asm-h8300/futex.h53
-rw-r--r--include/asm-h8300/hdreg.h15
-rw-r--r--include/asm-h8300/uaccess.h6
-rw-r--r--include/asm-i386/auxvec.h11
-rw-r--r--include/asm-i386/elf.h8
-rw-r--r--include/asm-i386/fcntl.h89
-rw-r--r--include/asm-i386/futex.h108
-rw-r--r--include/asm-i386/hdreg.h1
-rw-r--r--include/asm-i386/uaccess.h24
-rw-r--r--include/asm-ia64/auxvec.h11
-rw-r--r--include/asm-ia64/compat.h20
-rw-r--r--include/asm-ia64/elf.h8
-rw-r--r--include/asm-ia64/fcntl.h78
-rw-r--r--include/asm-ia64/futex.h53
-rw-r--r--include/asm-ia64/hdreg.h14
-rw-r--r--include/asm-ia64/hw_irq.h7
-rw-r--r--include/asm-ia64/irq.h11
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-ia64/processor.h3
-rw-r--r--include/asm-ia64/topology.h23
-rw-r--r--include/asm-ia64/uaccess.h7
-rw-r--r--include/asm-m32r/auxvec.h4
-rw-r--r--include/asm-m32r/fcntl.h93
-rw-r--r--include/asm-m32r/futex.h53
-rw-r--r--include/asm-m32r/hdreg.h1
-rw-r--r--include/asm-m32r/uaccess.h25
-rw-r--r--include/asm-m68k/auxvec.h4
-rw-r--r--include/asm-m68k/fcntl.h78
-rw-r--r--include/asm-m68k/futex.h53
-rw-r--r--include/asm-m68k/hdreg.h1
-rw-r--r--include/asm-m68k/uaccess.h6
-rw-r--r--include/asm-m68knommu/auxvec.h4
-rw-r--r--include/asm-m68knommu/futex.h53
-rw-r--r--include/asm-m68knommu/hdreg.h1
-rw-r--r--include/asm-m68knommu/uaccess.h6
-rw-r--r--include/asm-mips/auxvec.h4
-rw-r--r--include/asm-mips/compat.h10
-rw-r--r--include/asm-mips/fcntl.h75
-rw-r--r--include/asm-mips/futex.h53
-rw-r--r--include/asm-mips/hdreg.h1
-rw-r--r--include/asm-mips/uaccess.h23
-rw-r--r--include/asm-parisc/auxvec.h4
-rw-r--r--include/asm-parisc/compat.h10
-rw-r--r--include/asm-parisc/fcntl.h56
-rw-r--r--include/asm-parisc/futex.h53
-rw-r--r--include/asm-parisc/hdreg.h1
-rw-r--r--include/asm-parisc/irq.h5
-rw-r--r--include/asm-parisc/uaccess.h4
-rw-r--r--include/asm-powerpc/fcntl.h11
-rw-r--r--include/asm-ppc/auxvec.h14
-rw-r--r--include/asm-ppc/elf.h11
-rw-r--r--include/asm-ppc/fcntl.h93
-rw-r--r--include/asm-ppc/futex.h53
-rw-r--r--include/asm-ppc/ibm_ocp.h2
-rw-r--r--include/asm-ppc/irq.h5
-rw-r--r--include/asm-ppc/uaccess.h7
-rw-r--r--include/asm-ppc64/auxvec.h19
-rw-r--r--include/asm-ppc64/compat.h18
-rw-r--r--include/asm-ppc64/elf.h16
-rw-r--r--include/asm-ppc64/fcntl.h89
-rw-r--r--include/asm-ppc64/futex.h83
-rw-r--r--include/asm-ppc64/irq.h5
-rw-r--r--include/asm-ppc64/kprobes.h3
-rw-r--r--include/asm-ppc64/memory.h2
-rw-r--r--include/asm-ppc64/processor.h14
-rw-r--r--include/asm-ppc64/uaccess.h7
-rw-r--r--include/asm-s390/auxvec.h4
-rw-r--r--include/asm-s390/compat.h20
-rw-r--r--include/asm-s390/fcntl.h98
-rw-r--r--include/asm-s390/futex.h53
-rw-r--r--include/asm-s390/uaccess.h7
-rw-r--r--include/asm-sh/auxvec.h4
-rw-r--r--include/asm-sh/fcntl.h89
-rw-r--r--include/asm-sh/futex.h53
-rw-r--r--include/asm-sh/hdreg.h1
-rw-r--r--include/asm-sh/uaccess.h6
-rw-r--r--include/asm-sh64/auxvec.h4
-rw-r--r--include/asm-sh64/fcntl.h6
-rw-r--r--include/asm-sh64/futex.h53
-rw-r--r--include/asm-sh64/hdreg.h6
-rw-r--r--include/asm-sh64/uaccess.h6
-rw-r--r--include/asm-sparc/auxvec.h4
-rw-r--r--include/asm-sparc/fcntl.h59
-rw-r--r--include/asm-sparc/futex.h53
-rw-r--r--include/asm-sparc/hdreg.h1
-rw-r--r--include/asm-sparc/uaccess.h6
-rw-r--r--include/asm-sparc64/auxvec.h4
-rw-r--r--include/asm-sparc64/compat.h18
-rw-r--r--include/asm-sparc64/fcntl.h46
-rw-r--r--include/asm-sparc64/futex.h53
-rw-r--r--include/asm-sparc64/hdreg.h1
-rw-r--r--include/asm-sparc64/uaccess.h6
-rw-r--r--include/asm-um/auxvec.h4
-rw-r--r--include/asm-um/futex.h53
-rw-r--r--include/asm-um/hdreg.h6
-rw-r--r--include/asm-v850/auxvec.h4
-rw-r--r--include/asm-v850/fcntl.h78
-rw-r--r--include/asm-v850/futex.h53
-rw-r--r--include/asm-v850/uaccess.h6
-rw-r--r--include/asm-x86_64/auxvec.h4
-rw-r--r--include/asm-x86_64/compat.h20
-rw-r--r--include/asm-x86_64/fcntl.h77
-rw-r--r--include/asm-x86_64/futex.h98
-rw-r--r--include/asm-x86_64/hdreg.h1
-rw-r--r--include/asm-x86_64/processor.h2
-rw-r--r--include/asm-x86_64/uaccess.h7
-rw-r--r--include/asm-xtensa/auxvec.h4
-rw-r--r--include/asm-xtensa/fcntl.h48
-rw-r--r--include/asm-xtensa/hdreg.h17
-rw-r--r--include/linux/auxvec.h31
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/cpuset.h11
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/dmi.h36
-rw-r--r--include/linux/elf.h24
-rw-r--r--include/linux/ext2_fs.h3
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/firmware.h5
-rw-r--r--include/linux/fs.h13
-rw-r--r--include/linux/futex.h36
-rw-r--r--include/linux/gfp.h8
-rw-r--r--include/linux/inotify.h1
-rw-r--r--include/linux/input.h25
-rw-r--r--include/linux/ioctl32.h22
-rw-r--r--include/linux/ipmi.h34
-rw-r--r--include/linux/irq.h130
-rw-r--r--include/linux/isdn.h1
-rw-r--r--include/linux/jbd.h1
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/linkage.h7
-rw-r--r--include/linux/mmc/card.h15
-rw-r--r--include/linux/mmc/host.h14
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmc/protocol.h7
-rw-r--r--include/linux/msg.h1
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h5
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_core.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_nat_rule.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/pipe_fs_i.h3
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/relayfs_fs.h255
-rw-r--r--include/linux/sched.h28
-rw-r--r--include/linux/sem.h1
-rw-r--r--include/linux/serial_core.h6
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/linux/slab.h16
-rw-r--r--include/linux/sonypi.h2
-rw-r--r--include/linux/sunrpc/cache.h1
-rw-r--r--include/linux/sysctl.h6
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/timex.h23
-rw-r--r--include/linux/topology.h23
-rw-r--r--include/linux/wireless.h38
-rw-r--r--include/net/ax25.h2
-rw-r--r--include/net/iw_handler.h123
-rw-r--r--include/sound/core.h2
-rw-r--r--include/video/w100fb.h138
-rw-r--r--init/main.c39
-rw-r--r--ipc/compat.c12
-rw-r--r--ipc/msg.c82
-rw-r--r--ipc/sem.c73
-rw-r--r--ipc/shm.c86
-rw-r--r--ipc/util.c156
-rw-r--r--ipc/util.h8
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/cpuset.c125
-rw-r--r--kernel/futex.c137
-rw-r--r--kernel/intermodule.c3
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/manage.c4
-rw-r--r--kernel/irq/proc.c14
-rw-r--r--kernel/kprobes.c94
-rw-r--r--kernel/module.c33
-rw-r--r--kernel/params.c4
-rw-r--r--kernel/posix-timers.c28
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/pm.c3
-rw-r--r--kernel/power/swsusp.c1
-rw-r--r--kernel/printk.c13
-rw-r--r--kernel/ptrace.c41
-rw-r--r--kernel/resource.c3
-rw-r--r--kernel/sched.c339
-rw-r--r--kernel/signal.c83
-rw-r--r--kernel/softlockup.c151
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/timer.c18
-rw-r--r--kernel/workqueue.c5
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/radix-tree.c176
-rw-r--r--mm/mmap.c9
-rw-r--r--mm/oom_kill.c62
-rw-r--r--mm/page_alloc.c28
-rw-r--r--mm/readahead.c1
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/slab.c18
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/ax25/af_ax25.c7
-rw-r--r--net/ax25/ax25_addr.c3
-rw-r--r--net/ax25/ax25_route.c7
-rw-r--r--net/ax25/ax25_uid.c4
-rw-r--r--net/core/sock.c9
-rw-r--r--net/core/wireless.c58
-rw-r--r--net/ieee80211/ieee80211_crypt.c27
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c47
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c133
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c30
-rw-r--r--net/ieee80211/ieee80211_module.c40
-rw-r--r--net/ieee80211/ieee80211_rx.c310
-rw-r--r--net/ieee80211/ieee80211_tx.c66
-rw-r--r--net/ieee80211/ieee80211_wx.c73
-rw-r--r--net/ipv4/ip_fragment.c2
-rw-r--r--net/ipv4/netfilter/Kconfig20
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_amanda.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c38
-rw-r--r--net/ipv4/netfilter/ip_conntrack_ftp.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_irc.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netbios_ns.c131
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c13
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_standalone.c2
-rw-r--r--net/ipv4/netfilter/ip_conntrack_tftp.c1
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c21
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c8
-rw-r--r--net/netfilter/nfnetlink_queue.c8
-rw-r--r--net/netlink/af_netlink.c59
-rw-r--r--net/netrom/af_netrom.c7
-rw-r--r--net/netrom/nr_route.c8
-rw-r--r--net/packet/af_packet.c6
-rw-r--r--net/rose/af_rose.c6
-rw-r--r--net/rose/rose_route.c14
-rw-r--r--net/rose/rose_subr.c5
-rw-r--r--net/socket.c22
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c8
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/stats.c16
-rw-r--r--net/sunrpc/sunrpc_syms.c6
-rw-r--r--net/sunrpc/svcauth.c1
-rw-r--r--net/sunrpc/svcauth_unix.c1
-rw-r--r--scripts/kallsyms.c427
-rwxr-xr-xscripts/ver_linux6
-rw-r--r--sound/arm/Makefile16
-rw-r--r--sound/arm/aaci.c4
-rw-r--r--sound/arm/aaci.h6
-rw-r--r--sound/core/memory.c14
-rw-r--r--sound/isa/Kconfig19
-rw-r--r--sound/oss/os.h3
-rw-r--r--sound/pci/ali5451/ali5451.c2
758 files changed, 28713 insertions, 13530 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 84d3d4d10c17..bf1cf98d2a27 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -605,12 +605,13 @@ is in the ipmi_poweroff module. When the system requests a powerdown,
605it will send the proper IPMI commands to do this. This is supported on 605it will send the proper IPMI commands to do this. This is supported on
606several platforms. 606several platforms.
607 607
608There is a module parameter named "poweroff_control" that may either be zero 608There is a module parameter named "poweroff_powercycle" that may
609(do a power down) or 2 (do a power cycle, power the system off, then power 609either be zero (do a power down) or non-zero (do a power cycle, power
610it on in a few seconds). Setting ipmi_poweroff.poweroff_control=x will do 610the system off, then power it on in a few seconds). Setting
611the same thing on the kernel command line. The parameter is also available 611ipmi_poweroff.poweroff_control=x will do the same thing on the kernel
612via the proc filesystem in /proc/ipmi/poweroff_control. Note that if the 612command line. The parameter is also available via the proc filesystem
613system does not support power cycling, it will always to the power off. 613in /proc/sys/dev/ipmi/poweroff_powercycle. Note that if the system
614does not support power cycling, it will always do the power off.
614 615
615Note that if you have ACPI enabled, the system will prefer using ACPI to 616Note that if you have ACPI enabled, the system will prefer using ACPI to
616power off. 617power off.
diff --git a/Documentation/RCU/NMI-RCU.txt b/Documentation/RCU/NMI-RCU.txt
new file mode 100644
index 000000000000..d0634a5c3445
--- /dev/null
+++ b/Documentation/RCU/NMI-RCU.txt
@@ -0,0 +1,112 @@
1Using RCU to Protect Dynamic NMI Handlers
2
3
4Although RCU is usually used to protect read-mostly data structures,
5it is possible to use RCU to provide dynamic non-maskable interrupt
6handlers, as well as dynamic irq handlers. This document describes
7how to do this, drawing loosely from Zwane Mwaikambo's NMI-timer
8work in "arch/i386/oprofile/nmi_timer_int.c" and in
9"arch/i386/kernel/traps.c".
10
11The relevant pieces of code are listed below, each followed by a
12brief explanation.
13
14 static int dummy_nmi_callback(struct pt_regs *regs, int cpu)
15 {
16 return 0;
17 }
18
19The dummy_nmi_callback() function is a "dummy" NMI handler that does
20nothing, but returns zero, thus saying that it did nothing, allowing
21the NMI handler to take the default machine-specific action.
22
23 static nmi_callback_t nmi_callback = dummy_nmi_callback;
24
25This nmi_callback variable is a global function pointer to the current
26NMI handler.
27
28 fastcall void do_nmi(struct pt_regs * regs, long error_code)
29 {
30 int cpu;
31
32 nmi_enter();
33
34 cpu = smp_processor_id();
35 ++nmi_count(cpu);
36
37 if (!rcu_dereference(nmi_callback)(regs, cpu))
38 default_do_nmi(regs);
39
40 nmi_exit();
41 }
42
43The do_nmi() function processes each NMI. It first disables preemption
44in the same way that a hardware irq would, then increments the per-CPU
45count of NMIs. It then invokes the NMI handler stored in the nmi_callback
46function pointer. If this handler returns zero, do_nmi() invokes the
47default_do_nmi() function to handle a machine-specific NMI. Finally,
48preemption is restored.
49
50Strictly speaking, rcu_dereference() is not needed, since this code runs
51only on i386, which does not need rcu_dereference() anyway. However,
52it is a good documentation aid, particularly for anyone attempting to
53do something similar on Alpha.
54
55Quick Quiz: Why might the rcu_dereference() be necessary on Alpha,
56 given that the code referenced by the pointer is read-only?
57
58
59Back to the discussion of NMI and RCU...
60
61 void set_nmi_callback(nmi_callback_t callback)
62 {
63 rcu_assign_pointer(nmi_callback, callback);
64 }
65
66The set_nmi_callback() function registers an NMI handler. Note that any
67data that is to be used by the callback must be initialized up -before-
68the call to set_nmi_callback(). On architectures that do not order
69writes, the rcu_assign_pointer() ensures that the NMI handler sees the
70initialized values.
71
72 void unset_nmi_callback(void)
73 {
74 rcu_assign_pointer(nmi_callback, dummy_nmi_callback);
75 }
76
77This function unregisters an NMI handler, restoring the original
78dummy_nmi_handler(). However, there may well be an NMI handler
79currently executing on some other CPU. We therefore cannot free
80up any data structures used by the old NMI handler until execution
81of it completes on all other CPUs.
82
83One way to accomplish this is via synchronize_sched(), perhaps as
84follows:
85
86 unset_nmi_callback();
87 synchronize_sched();
88 kfree(my_nmi_data);
89
90This works because synchronize_sched() blocks until all CPUs complete
91any preemption-disabled segments of code that they were executing.
92Since NMI handlers disable preemption, synchronize_sched() is guaranteed
93not to return until all ongoing NMI handlers exit. It is therefore safe
94to free up the handler's data as soon as synchronize_sched() returns.
95
96
97Answer to Quick Quiz
98
99 Why might the rcu_dereference() be necessary on Alpha, given
100 that the code referenced by the pointer is read-only?
101
102 Answer: The caller to set_nmi_callback() might well have
103 initialized some data that is to be used by the
104 new NMI handler. In this case, the rcu_dereference()
105 would be needed, because otherwise a CPU that received
106 an NMI just after the new handler was set might see
107 the pointer to the new NMI handler, but the old
108 pre-initialized version of the handler's data.
109
110 More important, the rcu_dereference() makes it clear
111 to someone reading the code that the pointer is being
112 protected by RCU.
diff --git a/Documentation/cdrom/sonycd535 b/Documentation/cdrom/sonycd535
index 59581a4b302a..b81e109970aa 100644
--- a/Documentation/cdrom/sonycd535
+++ b/Documentation/cdrom/sonycd535
@@ -68,7 +68,8 @@ it a better device citizen. Further thanks to Joel Katz
68Porfiri Claudio <C.Porfiri@nisms.tei.ericsson.se> for patches 68Porfiri Claudio <C.Porfiri@nisms.tei.ericsson.se> for patches
69to make the driver work with the older CDU-510/515 series, and 69to make the driver work with the older CDU-510/515 series, and
70Heiko Eissfeldt <heiko@colossus.escape.de> for pointing out that 70Heiko Eissfeldt <heiko@colossus.escape.de> for pointing out that
71the verify_area() checks were ignoring the results of said checks. 71the verify_area() checks were ignoring the results of said checks
72(note: verify_area() has since been replaced by access_ok()).
72 73
73(Acknowledgments from Ron Jeppesen in the 0.3 release:) 74(Acknowledgments from Ron Jeppesen in the 0.3 release:)
74Thanks to Corey Minyard who wrote the original CDU-31A driver on which 75Thanks to Corey Minyard who wrote the original CDU-31A driver on which
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index ad944c060312..47f4114fbf54 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -60,6 +60,18 @@ all of the cpus in the system. This removes any overhead due to
60load balancing code trying to pull tasks outside of the cpu exclusive 60load balancing code trying to pull tasks outside of the cpu exclusive
61cpuset only to be prevented by the tasks' cpus_allowed mask. 61cpuset only to be prevented by the tasks' cpus_allowed mask.
62 62
63A cpuset that is mem_exclusive restricts kernel allocations for
64page, buffer and other data commonly shared by the kernel across
65multiple users. All cpusets, whether mem_exclusive or not, restrict
66allocations of memory for user space. This enables configuring a
67system so that several independent jobs can share common kernel
68data, such as file system pages, while isolating each jobs user
69allocation in its own cpuset. To do this, construct a large
70mem_exclusive cpuset to hold all the jobs, and construct child,
71non-mem_exclusive cpusets for each individual job. Only a small
72amount of typical kernel memory, such as requests from interrupt
73handlers, is allowed to be taken outside even a mem_exclusive cpuset.
74
63User level code may create and destroy cpusets by name in the cpuset 75User level code may create and destroy cpusets by name in the cpuset
64virtual file system, manage the attributes and permissions of these 76virtual file system, manage the attributes and permissions of these
65cpusets and which CPUs and Memory Nodes are assigned to each cpuset, 77cpusets and which CPUs and Memory Nodes are assigned to each cpuset,
diff --git a/Documentation/dcdbas.txt b/Documentation/dcdbas.txt
new file mode 100644
index 000000000000..e1c52e2dc361
--- /dev/null
+++ b/Documentation/dcdbas.txt
@@ -0,0 +1,91 @@
1Overview
2
3The Dell Systems Management Base Driver provides a sysfs interface for
4systems management software such as Dell OpenManage to perform system
5management interrupts and host control actions (system power cycle or
6power off after OS shutdown) on certain Dell systems.
7
8Dell OpenManage requires this driver on the following Dell PowerEdge systems:
9300, 1300, 1400, 400SC, 500SC, 1500SC, 1550, 600SC, 1600SC, 650, 1655MC,
10700, and 750. Other Dell software such as the open source libsmbios project
11is expected to make use of this driver, and it may include the use of this
12driver on other Dell systems.
13
14The Dell libsmbios project aims towards providing access to as much BIOS
15information as possible. See http://linux.dell.com/libsmbios/main/ for
16more information about the libsmbios project.
17
18
19System Management Interrupt
20
21On some Dell systems, systems management software must access certain
22management information via a system management interrupt (SMI). The SMI data
23buffer must reside in 32-bit address space, and the physical address of the
24buffer is required for the SMI. The driver maintains the memory required for
25the SMI and provides a way for the application to generate the SMI.
26The driver creates the following sysfs entries for systems management
27software to perform these system management interrupts:
28
29/sys/devices/platform/dcdbas/smi_data
30/sys/devices/platform/dcdbas/smi_data_buf_phys_addr
31/sys/devices/platform/dcdbas/smi_data_buf_size
32/sys/devices/platform/dcdbas/smi_request
33
34Systems management software must perform the following steps to execute
35a SMI using this driver:
36
371) Lock smi_data.
382) Write system management command to smi_data.
393) Write "1" to smi_request to generate a calling interface SMI or
40 "2" to generate a raw SMI.
414) Read system management command response from smi_data.
425) Unlock smi_data.
43
44
45Host Control Action
46
47Dell OpenManage supports a host control feature that allows the administrator
48to perform a power cycle or power off of the system after the OS has finished
49shutting down. On some Dell systems, this host control feature requires that
50a driver perform a SMI after the OS has finished shutting down.
51
52The driver creates the following sysfs entries for systems management software
53to schedule the driver to perform a power cycle or power off host control
54action after the system has finished shutting down:
55
56/sys/devices/platform/dcdbas/host_control_action
57/sys/devices/platform/dcdbas/host_control_smi_type
58/sys/devices/platform/dcdbas/host_control_on_shutdown
59
60Dell OpenManage performs the following steps to execute a power cycle or
61power off host control action using this driver:
62
631) Write host control action to be performed to host_control_action.
642) Write type of SMI that driver needs to perform to host_control_smi_type.
653) Write "1" to host_control_on_shutdown to enable host control action.
664) Initiate OS shutdown.
67 (Driver will perform host control SMI when it is notified that the OS
68 has finished shutting down.)
69
70
71Host Control SMI Type
72
73The following table shows the value to write to host_control_smi_type to
74perform a power cycle or power off host control action:
75
76PowerEdge System Host Control SMI Type
77---------------- ---------------------
78 300 HC_SMITYPE_TYPE1
79 1300 HC_SMITYPE_TYPE1
80 1400 HC_SMITYPE_TYPE2
81 500SC HC_SMITYPE_TYPE2
82 1500SC HC_SMITYPE_TYPE2
83 1550 HC_SMITYPE_TYPE2
84 600SC HC_SMITYPE_TYPE2
85 1600SC HC_SMITYPE_TYPE2
86 650 HC_SMITYPE_TYPE2
87 1655MC HC_SMITYPE_TYPE2
88 700 HC_SMITYPE_TYPE3
89 750 HC_SMITYPE_TYPE3
90
91
diff --git a/Documentation/dell_rbu.txt b/Documentation/dell_rbu.txt
new file mode 100644
index 000000000000..bcfa5c35036b
--- /dev/null
+++ b/Documentation/dell_rbu.txt
@@ -0,0 +1,74 @@
1Purpose:
2Demonstrate the usage of the new open sourced rbu (Remote BIOS Update) driver
3for updating BIOS images on Dell servers and desktops.
4
5Scope:
6This document discusses the functionality of the rbu driver only.
7It does not cover the support needed from aplications to enable the BIOS to
8update itself with the image downloaded in to the memory.
9
10Overview:
11This driver works with Dell OpenManage or Dell Update Packages for updating
12the BIOS on Dell servers (starting from servers sold since 1999), desktops
13and notebooks (starting from those sold in 2005).
14Please go to http://support.dell.com register and you can find info on
15OpenManage and Dell Update packages (DUP).
16
17Dell_RBU driver supports BIOS update using the monilothic image and packetized
18image methods. In case of moniolithic the driver allocates a contiguous chunk
19of physical pages having the BIOS image. In case of packetized the app
20using the driver breaks the image in to packets of fixed sizes and the driver
21would place each packet in contiguous physical memory. The driver also
22maintains a link list of packets for reading them back.
23If the dell_rbu driver is unloaded all the allocated memory is freed.
24
25The rbu driver needs to have an application which will inform the BIOS to
26enable the update in the next system reboot.
27
28The user should not unload the rbu driver after downloading the BIOS image
29or updating.
30
31The driver load creates the following directories under the /sys file system.
32/sys/class/firmware/dell_rbu/loading
33/sys/class/firmware/dell_rbu/data
34/sys/devices/platform/dell_rbu/image_type
35/sys/devices/platform/dell_rbu/data
36
37The driver supports two types of update mechanism; monolithic and packetized.
38These update mechanism depends upon the BIOS currently running on the system.
39Most of the Dell systems support a monolithic update where the BIOS image is
40copied to a single contiguous block of physical memory.
41In case of packet mechanism the single memory can be broken in smaller chuks
42of contiguous memory and the BIOS image is scattered in these packets.
43
44By default the driver uses monolithic memory for the update type. This can be
45changed to contiguous during the driver load time by specifying the load
46parameter image_type=packet. This can also be changed later as below
47echo packet > /sys/devices/platform/dell_rbu/image_type
48
49Do the steps below to download the BIOS image.
501) echo 1 > /sys/class/firmware/dell_rbu/loading
512) cp bios_image.hdr /sys/class/firmware/dell_rbu/data
523) echo 0 > /sys/class/firmware/dell_rbu/loading
53
54The /sys/class/firmware/dell_rbu/ entries will remain till the following is
55done.
56echo -1 > /sys/class/firmware/dell_rbu/loading
57
58Until this step is completed the drivr cannot be unloaded.
59
60Also the driver provides /sys/devices/platform/dell_rbu/data readonly file to
61read back the image downloaded. This is useful in case of packet update
62mechanism where the above steps 1,2,3 will repeated for every packet.
63By reading the /sys/devices/platform/dell_rbu/data file all packet data
64downloaded can be verified in a single file.
65The packets are arranged in this file one after the other in a FIFO order.
66
67NOTE:
68This driver requires a patch for firmware_class.c which has the addition
69of request_firmware_nowait_nohotplug function to wortk
70Also after updating the BIOS image an user mdoe application neeeds to execute
71code which message the BIOS update request to the BIOS. So on the next reboot
72the BIOS knows about the new image downloaded and it updates it self.
73Also don't unload the rbu drive if the image has to be updated.
74
diff --git a/Documentation/dvb/bt8xx.txt b/Documentation/dvb/bt8xx.txt
index e6b8d05bc08d..4b8c326c6aac 100644
--- a/Documentation/dvb/bt8xx.txt
+++ b/Documentation/dvb/bt8xx.txt
@@ -16,7 +16,7 @@ Enable the following options:
16"Device drivers" => "Multimedia devices" 16"Device drivers" => "Multimedia devices"
17 => "Video For Linux" => "BT848 Video For Linux" 17 => "Video For Linux" => "BT848 Video For Linux"
18"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices" 18"Device drivers" => "Multimedia devices" => "Digital Video Broadcasting Devices"
19 => "DVB for Linux" "DVB Core Support" "Nebula/Pinnacle PCTV/TwinHan PCI Cards" 19 => "DVB for Linux" "DVB Core Support" "BT8xx based PCI cards"
20 20
213) Loading Modules, described by two approaches 213) Loading Modules, described by two approaches
22=============================================== 22===============================================
diff --git a/Documentation/exception.txt b/Documentation/exception.txt
index f1d436993eb1..3cb39ade290e 100644
--- a/Documentation/exception.txt
+++ b/Documentation/exception.txt
@@ -7,7 +7,7 @@ To protect itself the kernel has to verify this address.
7 7
8In older versions of Linux this was done with the 8In older versions of Linux this was done with the
9int verify_area(int type, const void * addr, unsigned long size) 9int verify_area(int type, const void * addr, unsigned long size)
10function. 10function (which has since been replaced by access_ok()).
11 11
12This function verified that the memory area starting at address 12This function verified that the memory area starting at address
13addr and of size size was accessible for the operation specified 13addr and of size size was accessible for the operation specified
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 363909056e46..2e0a01b21fe0 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -51,14 +51,6 @@ Who: Adrian Bunk <bunk@stusta.de>
51 51
52--------------------------- 52---------------------------
53 53
54What: register_ioctl32_conversion() / unregister_ioctl32_conversion()
55When: April 2005
56Why: Replaced by ->compat_ioctl in file_operations and other method
57 vecors.
58Who: Andi Kleen <ak@muc.de>, Christoph Hellwig <hch@lst.de>
59
60---------------------------
61
62What: RCU API moves to EXPORT_SYMBOL_GPL 54What: RCU API moves to EXPORT_SYMBOL_GPL
63When: April 2006 55When: April 2006
64Files: include/linux/rcupdate.h, kernel/rcupdate.c 56Files: include/linux/rcupdate.h, kernel/rcupdate.c
@@ -74,14 +66,6 @@ Who: Paul E. McKenney <paulmck@us.ibm.com>
74 66
75--------------------------- 67---------------------------
76 68
77What: remove verify_area()
78When: July 2006
79Files: Various uaccess.h headers.
80Why: Deprecated and redundant. access_ok() should be used instead.
81Who: Jesper Juhl <juhl-lkml@dif.dk>
82
83---------------------------
84
85What: IEEE1394 Audio and Music Data Transmission Protocol driver, 69What: IEEE1394 Audio and Music Data Transmission Protocol driver,
86 Connection Management Procedures driver 70 Connection Management Procedures driver
87When: November 2005 71When: November 2005
diff --git a/Documentation/filesystems/relayfs.txt b/Documentation/filesystems/relayfs.txt
new file mode 100644
index 000000000000..d24e1b0d4f39
--- /dev/null
+++ b/Documentation/filesystems/relayfs.txt
@@ -0,0 +1,362 @@
1
2relayfs - a high-speed data relay filesystem
3============================================
4
5relayfs is a filesystem designed to provide an efficient mechanism for
6tools and facilities to relay large and potentially sustained streams
7of data from kernel space to user space.
8
9The main abstraction of relayfs is the 'channel'. A channel consists
10of a set of per-cpu kernel buffers each represented by a file in the
11relayfs filesystem. Kernel clients write into a channel using
12efficient write functions which automatically log to the current cpu's
13channel buffer. User space applications mmap() the per-cpu files and
14retrieve the data as it becomes available.
15
16The format of the data logged into the channel buffers is completely
17up to the relayfs client; relayfs does however provide hooks which
18allow clients to impose some stucture on the buffer data. Nor does
19relayfs implement any form of data filtering - this also is left to
20the client. The purpose is to keep relayfs as simple as possible.
21
22This document provides an overview of the relayfs API. The details of
23the function parameters are documented along with the functions in the
24filesystem code - please see that for details.
25
26Semantics
27=========
28
29Each relayfs channel has one buffer per CPU, each buffer has one or
30more sub-buffers. Messages are written to the first sub-buffer until
31it is too full to contain a new message, in which case it it is
32written to the next (if available). Messages are never split across
33sub-buffers. At this point, userspace can be notified so it empties
34the first sub-buffer, while the kernel continues writing to the next.
35
36When notified that a sub-buffer is full, the kernel knows how many
37bytes of it are padding i.e. unused. Userspace can use this knowledge
38to copy only valid data.
39
40After copying it, userspace can notify the kernel that a sub-buffer
41has been consumed.
42
43relayfs can operate in a mode where it will overwrite data not yet
44collected by userspace, and not wait for it to consume it.
45
46relayfs itself does not provide for communication of such data between
47userspace and kernel, allowing the kernel side to remain simple and not
48impose a single interface on userspace. It does provide a separate
49helper though, described below.
50
51klog, relay-app & librelay
52==========================
53
54relayfs itself is ready to use, but to make things easier, two
55additional systems are provided. klog is a simple wrapper to make
56writing formatted text or raw data to a channel simpler, regardless of
57whether a channel to write into exists or not, or whether relayfs is
58compiled into the kernel or is configured as a module. relay-app is
59the kernel counterpart of userspace librelay.c, combined these two
60files provide glue to easily stream data to disk, without having to
61bother with housekeeping. klog and relay-app can be used together,
62with klog providing high-level logging functions to the kernel and
63relay-app taking care of kernel-user control and disk-logging chores.
64
65It is possible to use relayfs without relay-app & librelay, but you'll
66have to implement communication between userspace and kernel, allowing
67both to convey the state of buffers (full, empty, amount of padding).
68
69klog, relay-app and librelay can be found in the relay-apps tarball on
70http://relayfs.sourceforge.net
71
72The relayfs user space API
73==========================
74
75relayfs implements basic file operations for user space access to
76relayfs channel buffer data. Here are the file operations that are
77available and some comments regarding their behavior:
78
79open() enables user to open an _existing_ buffer.
80
81mmap() results in channel buffer being mapped into the caller's
82 memory space. Note that you can't do a partial mmap - you must
83 map the entire file, which is NRBUF * SUBBUFSIZE.
84
85read() read the contents of a channel buffer. The bytes read are
86 'consumed' by the reader i.e. they won't be available again
87 to subsequent reads. If the channel is being used in
88 no-overwrite mode (the default), it can be read at any time
89 even if there's an active kernel writer. If the channel is
90 being used in overwrite mode and there are active channel
91 writers, results may be unpredictable - users should make
92 sure that all logging to the channel has ended before using
93 read() with overwrite mode.
94
95poll() POLLIN/POLLRDNORM/POLLERR supported. User applications are
96 notified when sub-buffer boundaries are crossed.
97
98close() decrements the channel buffer's refcount. When the refcount
99 reaches 0 i.e. when no process or kernel client has the buffer
100 open, the channel buffer is freed.
101
102
103In order for a user application to make use of relayfs files, the
104relayfs filesystem must be mounted. For example,
105
106 mount -t relayfs relayfs /mnt/relay
107
108NOTE: relayfs doesn't need to be mounted for kernel clients to create
109 or use channels - it only needs to be mounted when user space
110 applications need access to the buffer data.
111
112
113The relayfs kernel API
114======================
115
116Here's a summary of the API relayfs provides to in-kernel clients:
117
118
119 channel management functions:
120
121 relay_open(base_filename, parent, subbuf_size, n_subbufs,
122 callbacks)
123 relay_close(chan)
124 relay_flush(chan)
125 relay_reset(chan)
126 relayfs_create_dir(name, parent)
127 relayfs_remove_dir(dentry)
128
129 channel management typically called on instigation of userspace:
130
131 relay_subbufs_consumed(chan, cpu, subbufs_consumed)
132
133 write functions:
134
135 relay_write(chan, data, length)
136 __relay_write(chan, data, length)
137 relay_reserve(chan, length)
138
139 callbacks:
140
141 subbuf_start(buf, subbuf, prev_subbuf, prev_padding)
142 buf_mapped(buf, filp)
143 buf_unmapped(buf, filp)
144
145 helper functions:
146
147 relay_buf_full(buf)
148 subbuf_start_reserve(buf, length)
149
150
151Creating a channel
152------------------
153
154relay_open() is used to create a channel, along with its per-cpu
155channel buffers. Each channel buffer will have an associated file
156created for it in the relayfs filesystem, which can be opened and
157mmapped from user space if desired. The files are named
158basename0...basenameN-1 where N is the number of online cpus, and by
159default will be created in the root of the filesystem. If you want a
160directory structure to contain your relayfs files, you can create it
161with relayfs_create_dir() and pass the parent directory to
162relay_open(). Clients are responsible for cleaning up any directory
163structure they create when the channel is closed - use
164relayfs_remove_dir() for that.
165
166The total size of each per-cpu buffer is calculated by multiplying the
167number of sub-buffers by the sub-buffer size passed into relay_open().
168The idea behind sub-buffers is that they're basically an extension of
169double-buffering to N buffers, and they also allow applications to
170easily implement random-access-on-buffer-boundary schemes, which can
171be important for some high-volume applications. The number and size
172of sub-buffers is completely dependent on the application and even for
173the same application, different conditions will warrant different
174values for these parameters at different times. Typically, the right
175values to use are best decided after some experimentation; in general,
176though, it's safe to assume that having only 1 sub-buffer is a bad
177idea - you're guaranteed to either overwrite data or lose events
178depending on the channel mode being used.
179
180Channel 'modes'
181---------------
182
183relayfs channels can be used in either of two modes - 'overwrite' or
184'no-overwrite'. The mode is entirely determined by the implementation
185of the subbuf_start() callback, as described below. In 'overwrite'
186mode, also known as 'flight recorder' mode, writes continuously cycle
187around the buffer and will never fail, but will unconditionally
188overwrite old data regardless of whether it's actually been consumed.
189In no-overwrite mode, writes will fail i.e. data will be lost, if the
190number of unconsumed sub-buffers equals the total number of
191sub-buffers in the channel. It should be clear that if there is no
192consumer or if the consumer can't consume sub-buffers fast enought,
193data will be lost in either case; the only difference is whether data
194is lost from the beginning or the end of a buffer.
195
196As explained above, a relayfs channel is made of up one or more
197per-cpu channel buffers, each implemented as a circular buffer
198subdivided into one or more sub-buffers. Messages are written into
199the current sub-buffer of the channel's current per-cpu buffer via the
200write functions described below. Whenever a message can't fit into
201the current sub-buffer, because there's no room left for it, the
202client is notified via the subbuf_start() callback that a switch to a
203new sub-buffer is about to occur. The client uses this callback to 1)
204initialize the next sub-buffer if appropriate 2) finalize the previous
205sub-buffer if appropriate and 3) return a boolean value indicating
206whether or not to actually go ahead with the sub-buffer switch.
207
208To implement 'no-overwrite' mode, the userspace client would provide
209an implementation of the subbuf_start() callback something like the
210following:
211
212static int subbuf_start(struct rchan_buf *buf,
213 void *subbuf,
214 void *prev_subbuf,
215 unsigned int prev_padding)
216{
217 if (prev_subbuf)
218 *((unsigned *)prev_subbuf) = prev_padding;
219
220 if (relay_buf_full(buf))
221 return 0;
222
223 subbuf_start_reserve(buf, sizeof(unsigned int));
224
225 return 1;
226}
227
228If the current buffer is full i.e. all sub-buffers remain unconsumed,
229the callback returns 0 to indicate that the buffer switch should not
230occur yet i.e. until the consumer has had a chance to read the current
231set of ready sub-buffers. For the relay_buf_full() function to make
232sense, the consumer is reponsible for notifying relayfs when
233sub-buffers have been consumed via relay_subbufs_consumed(). Any
234subsequent attempts to write into the buffer will again invoke the
235subbuf_start() callback with the same parameters; only when the
236consumer has consumed one or more of the ready sub-buffers will
237relay_buf_full() return 0, in which case the buffer switch can
238continue.
239
240The implementation of the subbuf_start() callback for 'overwrite' mode
241would be very similar:
242
243static int subbuf_start(struct rchan_buf *buf,
244 void *subbuf,
245 void *prev_subbuf,
246 unsigned int prev_padding)
247{
248 if (prev_subbuf)
249 *((unsigned *)prev_subbuf) = prev_padding;
250
251 subbuf_start_reserve(buf, sizeof(unsigned int));
252
253 return 1;
254}
255
256In this case, the relay_buf_full() check is meaningless and the
257callback always returns 1, causing the buffer switch to occur
258unconditionally. It's also meaningless for the client to use the
259relay_subbufs_consumed() function in this mode, as it's never
260consulted.
261
262The default subbuf_start() implementation, used if the client doesn't
263define any callbacks, or doesn't define the subbuf_start() callback,
264implements the simplest possible 'no-overwrite' mode i.e. it does
265nothing but return 0.
266
267Header information can be reserved at the beginning of each sub-buffer
268by calling the subbuf_start_reserve() helper function from within the
269subbuf_start() callback. This reserved area can be used to store
270whatever information the client wants. In the example above, room is
271reserved in each sub-buffer to store the padding count for that
272sub-buffer. This is filled in for the previous sub-buffer in the
273subbuf_start() implementation; the padding value for the previous
274sub-buffer is passed into the subbuf_start() callback along with a
275pointer to the previous sub-buffer, since the padding value isn't
276known until a sub-buffer is filled. The subbuf_start() callback is
277also called for the first sub-buffer when the channel is opened, to
278give the client a chance to reserve space in it. In this case the
279previous sub-buffer pointer passed into the callback will be NULL, so
280the client should check the value of the prev_subbuf pointer before
281writing into the previous sub-buffer.
282
283Writing to a channel
284--------------------
285
286kernel clients write data into the current cpu's channel buffer using
287relay_write() or __relay_write(). relay_write() is the main logging
288function - it uses local_irqsave() to protect the buffer and should be
289used if you might be logging from interrupt context. If you know
290you'll never be logging from interrupt context, you can use
291__relay_write(), which only disables preemption. These functions
292don't return a value, so you can't determine whether or not they
293failed - the assumption is that you wouldn't want to check a return
294value in the fast logging path anyway, and that they'll always succeed
295unless the buffer is full and no-overwrite mode is being used, in
296which case you can detect a failed write in the subbuf_start()
297callback by calling the relay_buf_full() helper function.
298
299relay_reserve() is used to reserve a slot in a channel buffer which
300can be written to later. This would typically be used in applications
301that need to write directly into a channel buffer without having to
302stage data in a temporary buffer beforehand. Because the actual write
303may not happen immediately after the slot is reserved, applications
304using relay_reserve() can keep a count of the number of bytes actually
305written, either in space reserved in the sub-buffers themselves or as
306a separate array. See the 'reserve' example in the relay-apps tarball
307at http://relayfs.sourceforge.net for an example of how this can be
308done. Because the write is under control of the client and is
309separated from the reserve, relay_reserve() doesn't protect the buffer
310at all - it's up to the client to provide the appropriate
311synchronization when using relay_reserve().
312
313Closing a channel
314-----------------
315
316The client calls relay_close() when it's finished using the channel.
317The channel and its associated buffers are destroyed when there are no
318longer any references to any of the channel buffers. relay_flush()
319forces a sub-buffer switch on all the channel buffers, and can be used
320to finalize and process the last sub-buffers before the channel is
321closed.
322
323Misc
324----
325
326Some applications may want to keep a channel around and re-use it
327rather than open and close a new channel for each use. relay_reset()
328can be used for this purpose - it resets a channel to its initial
329state without reallocating channel buffer memory or destroying
330existing mappings. It should however only be called when it's safe to
331do so i.e. when the channel isn't currently being written to.
332
333Finally, there are a couple of utility callbacks that can be used for
334different purposes. buf_mapped() is called whenever a channel buffer
335is mmapped from user space and buf_unmapped() is called when it's
336unmapped. The client can use this notification to trigger actions
337within the kernel application, such as enabling/disabling logging to
338the channel.
339
340
341Resources
342=========
343
344For news, example code, mailing list, etc. see the relayfs homepage:
345
346 http://relayfs.sourceforge.net
347
348
349Credits
350=======
351
352The ideas and specs for relayfs came about as a result of discussions
353on tracing involving the following:
354
355Michel Dagenais <michel.dagenais@polymtl.ca>
356Richard Moore <richardj_moore@uk.ibm.com>
357Bob Wisniewski <bob@watson.ibm.com>
358Karim Yaghmour <karim@opersys.com>
359Tom Zanussi <zanussi@us.ibm.com>
360
361Also thanks to Hubertus Franke for a lot of useful suggestions and bug
362reports.
diff --git a/Documentation/i386/boot.txt b/Documentation/i386/boot.txt
index 1c48f0eba6fb..10312bebe55d 100644
--- a/Documentation/i386/boot.txt
+++ b/Documentation/i386/boot.txt
@@ -2,7 +2,7 @@
2 ---------------------------- 2 ----------------------------
3 3
4 H. Peter Anvin <hpa@zytor.com> 4 H. Peter Anvin <hpa@zytor.com>
5 Last update 2002-01-01 5 Last update 2005-09-02
6 6
7On the i386 platform, the Linux kernel uses a rather complicated boot 7On the i386 platform, the Linux kernel uses a rather complicated boot
8convention. This has evolved partially due to historical aspects, as 8convention. This has evolved partially due to historical aspects, as
@@ -34,6 +34,8 @@ Protocol 2.02: (Kernel 2.4.0-test3-pre3) New command line protocol.
34Protocol 2.03: (Kernel 2.4.18-pre1) Explicitly makes the highest possible 34Protocol 2.03: (Kernel 2.4.18-pre1) Explicitly makes the highest possible
35 initrd address available to the bootloader. 35 initrd address available to the bootloader.
36 36
37Protocol 2.04: (Kernel 2.6.14) Extend the syssize field to four bytes.
38
37 39
38**** MEMORY LAYOUT 40**** MEMORY LAYOUT
39 41
@@ -103,10 +105,9 @@ The header looks like:
103Offset Proto Name Meaning 105Offset Proto Name Meaning
104/Size 106/Size
105 107
10601F1/1 ALL setup_sects The size of the setup in sectors 10801F1/1 ALL(1 setup_sects The size of the setup in sectors
10701F2/2 ALL root_flags If set, the root is mounted readonly 10901F2/2 ALL root_flags If set, the root is mounted readonly
10801F4/2 ALL syssize DO NOT USE - for bootsect.S use only 11001F4/4 2.04+(2 syssize The size of the 32-bit code in 16-byte paras
10901F6/2 ALL swap_dev DO NOT USE - obsolete
11001F8/2 ALL ram_size DO NOT USE - for bootsect.S use only 11101F8/2 ALL ram_size DO NOT USE - for bootsect.S use only
11101FA/2 ALL vid_mode Video mode control 11201FA/2 ALL vid_mode Video mode control
11201FC/2 ALL root_dev Default root device number 11301FC/2 ALL root_dev Default root device number
@@ -129,8 +130,12 @@ Offset Proto Name Meaning
1290228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line 1300228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
130022C/4 2.03+ initrd_addr_max Highest legal initrd address 131022C/4 2.03+ initrd_addr_max Highest legal initrd address
131 132
132For backwards compatibility, if the setup_sects field contains 0, the 133(1) For backwards compatibility, if the setup_sects field contains 0, the
133real value is 4. 134 real value is 4.
135
136(2) For boot protocol prior to 2.04, the upper two bytes of the syssize
137 field are unusable, which means the size of a bzImage kernel
138 cannot be determined.
134 139
135If the "HdrS" (0x53726448) magic number is not found at offset 0x202, 140If the "HdrS" (0x53726448) magic number is not found at offset 0x202,
136the boot protocol version is "old". Loading an old kernel, the 141the boot protocol version is "old". Loading an old kernel, the
@@ -230,12 +235,16 @@ loader to communicate with the kernel. Some of its options are also
230relevant to the boot loader itself, see "special command line options" 235relevant to the boot loader itself, see "special command line options"
231below. 236below.
232 237
233The kernel command line is a null-terminated string up to 255 238The kernel command line is a null-terminated string currently up to
234characters long, plus the final null. 239255 characters long, plus the final null. A string that is too long
240will be automatically truncated by the kernel, a boot loader may allow
241a longer command line to be passed to permit future kernels to extend
242this limit.
235 243
236If the boot protocol version is 2.02 or later, the address of the 244If the boot protocol version is 2.02 or later, the address of the
237kernel command line is given by the header field cmd_line_ptr (see 245kernel command line is given by the header field cmd_line_ptr (see
238above.) 246above.) This address can be anywhere between the end of the setup
247heap and 0xA0000.
239 248
240If the protocol version is *not* 2.02 or higher, the kernel 249If the protocol version is *not* 2.02 or higher, the kernel
241command line is entered using the following protocol: 250command line is entered using the following protocol:
@@ -255,7 +264,7 @@ command line is entered using the following protocol:
255**** SAMPLE BOOT CONFIGURATION 264**** SAMPLE BOOT CONFIGURATION
256 265
257As a sample configuration, assume the following layout of the real 266As a sample configuration, assume the following layout of the real
258mode segment: 267mode segment (this is a typical, and recommended layout):
259 268
260 0x0000-0x7FFF Real mode kernel 269 0x0000-0x7FFF Real mode kernel
261 0x8000-0x8FFF Stack and heap 270 0x8000-0x8FFF Stack and heap
@@ -312,9 +321,9 @@ Such a boot loader should enter the following fields in the header:
312 321
313**** LOADING THE REST OF THE KERNEL 322**** LOADING THE REST OF THE KERNEL
314 323
315The non-real-mode kernel starts at offset (setup_sects+1)*512 in the 324The 32-bit (non-real-mode) kernel starts at offset (setup_sects+1)*512
316kernel file (again, if setup_sects == 0 the real value is 4.) It 325in the kernel file (again, if setup_sects == 0 the real value is 4.)
317should be loaded at address 0x10000 for Image/zImage kernels and 326It should be loaded at address 0x10000 for Image/zImage kernels and
3180x100000 for bzImage kernels. 3270x100000 for bzImage kernels.
319 328
320The kernel is a bzImage kernel if the protocol >= 2.00 and the 0x01 329The kernel is a bzImage kernel if the protocol >= 2.00 and the 0x01
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 3d5cd7a09b2f..d2f0c67ba1fb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1174,6 +1174,11 @@ running once the system is up.
1174 New name for the ramdisk parameter. 1174 New name for the ramdisk parameter.
1175 See Documentation/ramdisk.txt. 1175 See Documentation/ramdisk.txt.
1176 1176
1177 rdinit= [KNL]
1178 Format: <full_path>
1179 Run specified binary instead of /init from the ramdisk,
1180 used for early userspace startup. See initrd.
1181
1177 reboot= [BUGS=IA-32,BUGS=ARM,BUGS=IA-64] Rebooting mode 1182 reboot= [BUGS=IA-32,BUGS=ARM,BUGS=IA-64] Rebooting mode
1178 Format: <reboot_mode>[,<reboot_mode2>[,...]] 1183 Format: <reboot_mode>[,<reboot_mode2>[,...]]
1179 See arch/*/kernel/reboot.c. 1184 See arch/*/kernel/reboot.c.
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index ddf907fbcc05..b0d50840788e 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -1,22 +1,20 @@
1From kernel/suspend.c: 1Some warnings, first.
2 2
3 * BIG FAT WARNING ********************************************************* 3 * BIG FAT WARNING *********************************************************
4 * 4 *
5 * If you have unsupported (*) devices using DMA...
6 * ...say goodbye to your data.
7 *
8 * If you touch anything on disk between suspend and resume... 5 * If you touch anything on disk between suspend and resume...
9 * ...kiss your data goodbye. 6 * ...kiss your data goodbye.
10 * 7 *
11 * If your disk driver does not support suspend... (IDE does) 8 * If you do resume from initrd after your filesystems are mounted...
12 * ...you'd better find out how to get along 9 * ...bye bye root partition.
13 * without your data. 10 * [this is actually same case as above]
14 *
15 * If you change kernel command line between suspend and resume...
16 * ...prepare for nasty fsck or worse.
17 * 11 *
18 * If you change your hardware while system is suspended... 12 * If you have unsupported (*) devices using DMA, you may have some
19 * ...well, it was not good idea. 13 * problems. If your disk driver does not support suspend... (IDE does),
14 * it may cause some problems, too. If you change kernel command line
15 * between suspend and resume, it may do something wrong. If you change
16 * your hardware while system is suspended... well, it was not good idea;
17 * but it will probably only crash.
20 * 18 *
21 * (*) suspend/resume support is needed to make it safe. 19 * (*) suspend/resume support is needed to make it safe.
22 20
@@ -30,6 +28,13 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state
30echo platform > /sys/power/disk; echo disk > /sys/power/state 28echo platform > /sys/power/disk; echo disk > /sys/power/state
31 29
32 30
31Encrypted suspend image:
32------------------------
33If you want to store your suspend image encrypted with a temporary
34key to prevent data gathering after resume you must compile
35crypto and the aes algorithm into the kernel - modules won't work
36as they cannot be loaded at resume time.
37
33 38
34Article about goals and implementation of Software Suspend for Linux 39Article about goals and implementation of Software Suspend for Linux
35~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 40~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -85,11 +90,6 @@ resume.
85You have your server on UPS. Power died, and UPS is indicating 30 90You have your server on UPS. Power died, and UPS is indicating 30
86seconds to failure. What do you do? Suspend to disk. 91seconds to failure. What do you do? Suspend to disk.
87 92
88Ethernet card in your server died. You want to replace it. Your
89server is not hotplug capable. What do you do? Suspend to disk,
90replace ethernet card, resume. If you are fast your users will not
91even see broken connections.
92
93 93
94Q: Maybe I'm missing something, but why don't the regular I/O paths work? 94Q: Maybe I'm missing something, but why don't the regular I/O paths work?
95 95
@@ -117,31 +117,6 @@ Q: Does linux support ACPI S4?
117 117
118A: Yes. That's what echo platform > /sys/power/disk does. 118A: Yes. That's what echo platform > /sys/power/disk does.
119 119
120Q: My machine doesn't work with ACPI. How can I use swsusp than ?
121
122A: Do a reboot() syscall with right parameters. Warning: glibc gets in
123its way, so check with strace:
124
125reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, 0xd000fce2)
126
127(Thanks to Peter Osterlund:)
128
129#include <unistd.h>
130#include <syscall.h>
131
132#define LINUX_REBOOT_MAGIC1 0xfee1dead
133#define LINUX_REBOOT_MAGIC2 672274793
134#define LINUX_REBOOT_CMD_SW_SUSPEND 0xD000FCE2
135
136int main()
137{
138 syscall(SYS_reboot, LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2,
139 LINUX_REBOOT_CMD_SW_SUSPEND, 0);
140 return 0;
141}
142
143Also /sys/ interface should be still present.
144
145Q: What is 'suspend2'? 120Q: What is 'suspend2'?
146 121
147A: suspend2 is 'Software Suspend 2', a forked implementation of 122A: suspend2 is 'Software Suspend 2', a forked implementation of
@@ -312,9 +287,45 @@ system is shut down or suspended. Additionally use the encrypted
312suspend image to prevent sensitive data from being stolen after 287suspend image to prevent sensitive data from being stolen after
313resume. 288resume.
314 289
315Q: Why we cannot suspend to a swap file? 290Q: Why can't we suspend to a swap file?
316 291
317A: Because accessing swap file needs the filesystem mounted, and 292A: Because accessing swap file needs the filesystem mounted, and
318filesystem might do something wrong (like replaying the journal) 293filesystem might do something wrong (like replaying the journal)
319during mount. [Probably could be solved by modifying every filesystem 294during mount.
320to support some kind of "really read-only!" option. Patches welcome.] 295
296There are few ways to get that fixed:
297
2981) Probably could be solved by modifying every filesystem to support
299some kind of "really read-only!" option. Patches welcome.
300
3012) suspend2 gets around that by storing absolute positions in on-disk
302image (and blocksize), with resume parameter pointing directly to
303suspend header.
304
305Q: Is there a maximum system RAM size that is supported by swsusp?
306
307A: It should work okay with highmem.
308
309Q: Does swsusp (to disk) use only one swap partition or can it use
310multiple swap partitions (aggregate them into one logical space)?
311
312A: Only one swap partition, sorry.
313
314Q: If my application(s) causes lots of memory & swap space to be used
315(over half of the total system RAM), is it correct that it is likely
316to be useless to try to suspend to disk while that app is running?
317
318A: No, it should work okay, as long as your app does not mlock()
319it. Just prepare big enough swap partition.
320
321Q: What information is usefull for debugging suspend-to-disk problems?
322
323A: Well, last messages on the screen are always useful. If something
324is broken, it is usually some kernel driver, therefore trying with as
325little as possible modules loaded helps a lot. I also prefer people to
326suspend from console, preferably without X running. Booting with
327init=/bin/bash, then swapon and starting suspend sequence manually
328usually does the trick. Then it is good idea to try with latest
329vanilla kernel.
330
331
diff --git a/Documentation/power/video.txt b/Documentation/power/video.txt
index 1a44e8acb54c..526d6dd267ea 100644
--- a/Documentation/power/video.txt
+++ b/Documentation/power/video.txt
@@ -120,6 +120,7 @@ IBM ThinkPad T42p (2373-GTG) s3_bios (2)
120IBM TP X20 ??? (*) 120IBM TP X20 ??? (*)
121IBM TP X30 s3_bios (2) 121IBM TP X30 s3_bios (2)
122IBM TP X31 / Type 2672-XXH none (1), use radeontool (http://fdd.com/software/radeon/) to turn off backlight. 122IBM TP X31 / Type 2672-XXH none (1), use radeontool (http://fdd.com/software/radeon/) to turn off backlight.
123IBM TP X32 none (1), but backlight is on and video is trashed after long suspend
123IBM Thinkpad X40 Type 2371-7JG s3_bios,s3_mode (4) 124IBM Thinkpad X40 Type 2371-7JG s3_bios,s3_mode (4)
124Medion MD4220 ??? (*) 125Medion MD4220 ??? (*)
125Samsung P35 vbetool needed (6) 126Samsung P35 vbetool needed (6)
diff --git a/Documentation/sonypi.txt b/Documentation/sonypi.txt
index 0f3b2405d09e..c1237a925505 100644
--- a/Documentation/sonypi.txt
+++ b/Documentation/sonypi.txt
@@ -99,6 +99,7 @@ statically linked into the kernel). Those options are:
99 SONYPI_MEYE_MASK 0x0400 99 SONYPI_MEYE_MASK 0x0400
100 SONYPI_MEMORYSTICK_MASK 0x0800 100 SONYPI_MEMORYSTICK_MASK 0x0800
101 SONYPI_BATTERY_MASK 0x1000 101 SONYPI_BATTERY_MASK 0x1000
102 SONYPI_WIRELESS_MASK 0x2000
102 103
103 useinput: if set (which is the default) two input devices are 104 useinput: if set (which is the default) two input devices are
104 created, one which interprets the jogdial events as 105 created, one which interprets the jogdial events as
@@ -137,6 +138,15 @@ Bugs:
137 speed handling etc). Use ACPI instead of APM if it works on your 138 speed handling etc). Use ACPI instead of APM if it works on your
138 laptop. 139 laptop.
139 140
141 - sonypi lacks the ability to distinguish between certain key
142 events on some models.
143
144 - some models with the nvidia card (geforce go 6200 tc) uses a
145 different way to adjust the backlighting of the screen. There
146 is a userspace utility to adjust the brightness on those models,
147 which can be downloaded from
148 http://www.acc.umu.se/~erikw/program/smartdimmer-0.1.tar.bz2
149
140 - since all development was done by reverse engineering, there is 150 - since all development was done by reverse engineering, there is
141 _absolutely no guarantee_ that this driver will not crash your 151 _absolutely no guarantee_ that this driver will not crash your
142 laptop. Permanently. 152 laptop. Permanently.
diff --git a/MAINTAINERS b/MAINTAINERS
index 7e1f67130a16..2af78e965dd7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -202,13 +202,6 @@ P: Colin Leroy
202M: colin@colino.net 202M: colin@colino.net
203S: Maintained 203S: Maintained
204 204
205ADVANSYS SCSI DRIVER
206P: Bob Frey
207M: linux@advansys.com
208W: http://www.advansys.com/linux.html
209L: linux-scsi@vger.kernel.org
210S: Maintained
211
212AEDSP16 DRIVER 205AEDSP16 DRIVER
213P: Riccardo Facchetti 206P: Riccardo Facchetti
214M: fizban@tin.it 207M: fizban@tin.it
@@ -696,6 +689,11 @@ M: dz@debian.org
696W: http://www.debian.org/~dz/i8k/ 689W: http://www.debian.org/~dz/i8k/
697S: Maintained 690S: Maintained
698 691
692DELL SYSTEMS MANAGEMENT BASE DRIVER (dcdbas)
693P: Doug Warzecha
694M: Douglas_Warzecha@dell.com
695S: Maintained
696
699DEVICE-MAPPER 697DEVICE-MAPPER
700P: Alasdair Kergon 698P: Alasdair Kergon
701L: dm-devel@redhat.com 699L: dm-devel@redhat.com
@@ -879,7 +877,7 @@ S: Maintained
879 877
880FILESYSTEMS (VFS and infrastructure) 878FILESYSTEMS (VFS and infrastructure)
881P: Alexander Viro 879P: Alexander Viro
882M: viro@parcelfarce.linux.theplanet.co.uk 880M: viro@zeniv.linux.org.uk
883S: Maintained 881S: Maintained
884 882
885FIRMWARE LOADER (request_firmware) 883FIRMWARE LOADER (request_firmware)
@@ -1967,7 +1965,6 @@ S: Supported
1967 1965
1968ROCKETPORT DRIVER 1966ROCKETPORT DRIVER
1969P: Comtrol Corp. 1967P: Comtrol Corp.
1970M: support@comtrol.com
1971W: http://www.comtrol.com 1968W: http://www.comtrol.com
1972S: Maintained 1969S: Maintained
1973 1970
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 189d5eababa8..786491f9ceb2 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -479,6 +479,9 @@ config EISA
479 depends on ALPHA_GENERIC || ALPHA_JENSEN || ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_RAWHIDE 479 depends on ALPHA_GENERIC || ALPHA_JENSEN || ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_RAWHIDE
480 default y 480 default y
481 481
482config ARCH_MAY_HAVE_PC_FDC
483 def_bool y
484
482config SMP 485config SMP
483 bool "Symmetric multi-processing support" 486 bool "Symmetric multi-processing support"
484 depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL 487 depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 8226c5cd788c..67be50b7d80a 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -149,7 +149,7 @@ irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs)
149 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 149 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
150 * called as close as possible to 500 ms before the new second starts. 150 * called as close as possible to 500 ms before the new second starts.
151 */ 151 */
152 if ((time_status & STA_UNSYNC) == 0 152 if (ntp_synced()
153 && xtime.tv_sec > state.last_rtc_update + 660 153 && xtime.tv_sec > state.last_rtc_update + 660
154 && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2 154 && xtime.tv_nsec >= 500000 - ((unsigned) TICK_SIZE) / 2
155 && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) { 155 && xtime.tv_nsec <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -502,10 +502,7 @@ do_settimeofday(struct timespec *tv)
502 set_normalized_timespec(&xtime, sec, nsec); 502 set_normalized_timespec(&xtime, sec, nsec);
503 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 503 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
504 504
505 time_adjust = 0; /* stop active adjtime() */ 505 ntp_clear();
506 time_status |= STA_UNSYNC;
507 time_maxerror = NTP_PHASE_LIMIT;
508 time_esterror = NTP_PHASE_LIMIT;
509 506
510 write_sequnlock_irq(&xtime_lock); 507 write_sequnlock_irq(&xtime_lock);
511 clock_was_set(); 508 clock_was_set();
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 68dfdba71d74..0f2899b4159d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -64,6 +64,9 @@ config GENERIC_CALIBRATE_DELAY
64config GENERIC_BUST_SPINLOCK 64config GENERIC_BUST_SPINLOCK
65 bool 65 bool
66 66
67config ARCH_MAY_HAVE_PC_FDC
68 bool
69
67config GENERIC_ISA_DMA 70config GENERIC_ISA_DMA
68 bool 71 bool
69 72
@@ -150,6 +153,7 @@ config ARCH_RPC
150 select ARCH_ACORN 153 select ARCH_ACORN
151 select FIQ 154 select FIQ
152 select TIMER_ACORN 155 select TIMER_ACORN
156 select ARCH_MAY_HAVE_PC_FDC
153 help 157 help
154 On the Acorn Risc-PC, Linux can support the internal IDE disk and 158 On the Acorn Risc-PC, Linux can support the internal IDE disk and
155 CD-ROM interface, serial and parallel port, and the floppy drive. 159 CD-ROM interface, serial and parallel port, and the floppy drive.
diff --git a/arch/arm/boot/compressed/head-sharpsl.S b/arch/arm/boot/compressed/head-sharpsl.S
index d6bf8a2b090d..59ad69640d6b 100644
--- a/arch/arm/boot/compressed/head-sharpsl.S
+++ b/arch/arm/boot/compressed/head-sharpsl.S
@@ -7,7 +7,8 @@
7 * so we have to figure out the machine for ourselves... 7 * so we have to figure out the machine for ourselves...
8 * 8 *
9 * Support for Poodle, Corgi (SL-C700), Shepherd (SL-C750) 9 * Support for Poodle, Corgi (SL-C700), Shepherd (SL-C750)
10 * and Husky (SL-C760). 10 * Husky (SL-C760), Tosa (SL-C6000), Spitz (SL-C3000),
11 * Akita (SL-C1000) and Borzoi (SL-C3100).
11 * 12 *
12 */ 13 */
13 14
@@ -23,6 +24,22 @@
23 24
24__SharpSL_start: 25__SharpSL_start:
25 26
27/* Check for TC6393 - if found we have a Tosa */
28 ldr r7, .TOSAID
29 mov r1, #0x10000000 @ Base address of TC6393 chip
30 mov r6, #0x03
31 ldrh r3, [r1, #8] @ Load TC6393XB Revison: This is 0x0003
32 cmp r6, r3
33 beq .SHARPEND @ Success -> tosa
34
35/* Check for pxa270 - if found, branch */
36 mrc p15, 0, r4, c0, c0 @ Get Processor ID
37 and r4, r4, #0xffffff00
38 ldr r3, .PXA270ID
39 cmp r4, r3
40 beq .PXA270
41
42/* Check for w100 - if not found we have a Poodle */
26 ldr r1, .W100ADDR @ Base address of w100 chip + regs offset 43 ldr r1, .W100ADDR @ Base address of w100 chip + regs offset
27 44
28 mov r6, #0x31 @ Load Magic Init value 45 mov r6, #0x31 @ Load Magic Init value
@@ -30,7 +47,7 @@ __SharpSL_start:
30 mov r5, #0x3000 47 mov r5, #0x3000
31.W100LOOP: 48.W100LOOP:
32 subs r5, r5, #1 49 subs r5, r5, #1
33 bne .W100LOOP 50 bne .W100LOOP
34 mov r6, #0x30 @ Load 2nd Magic Init value 51 mov r6, #0x30 @ Load 2nd Magic Init value
35 str r6, [r1, #0x280] @ to SCRATCH_UMSK 52 str r6, [r1, #0x280] @ to SCRATCH_UMSK
36 53
@@ -40,45 +57,52 @@ __SharpSL_start:
40 cmp r6, r3 57 cmp r6, r3
41 bne .SHARPEND @ We have no w100 - Poodle 58 bne .SHARPEND @ We have no w100 - Poodle
42 59
43 mrc p15, 0, r6, c0, c0 @ Get Processor ID 60/* Check for pxa250 - if found we have a Corgi */
44 and r6, r6, #0xffffff00
45 ldr r7, .CORGIID 61 ldr r7, .CORGIID
46 ldr r3, .PXA255ID 62 ldr r3, .PXA255ID
47 cmp r6, r3 63 cmp r4, r3
48 blo .SHARPEND @ We have a PXA250 - Corgi 64 blo .SHARPEND @ We have a PXA250 - Corgi
49 65
50 mov r1, #0x0c000000 @ Base address of NAND chip 66/* Check for 64MiB flash - if found we have a Shepherd */
51 ldrb r3, [r1, #24] @ Load FLASHCTL 67 bl get_flash_ids
52 bic r3, r3, #0x11 @ SET NCE
53 orr r3, r3, #0x0a @ SET CLR + FLWP
54 strb r3, [r1, #24] @ Save to FLASHCTL
55 mov r2, #0x90 @ Command "readid"
56 strb r2, [r1, #20] @ Save to FLASHIO
57 bic r3, r3, #2 @ CLR CLE
58 orr r3, r3, #4 @ SET ALE
59 strb r3, [r1, #24] @ Save to FLASHCTL
60 mov r2, #0 @ Address 0x00
61 strb r2, [r1, #20] @ Save to FLASHIO
62 bic r3, r3, #4 @ CLR ALE
63 strb r3, [r1, #24] @ Save to FLASHCTL
64.SHARP1:
65 ldrb r3, [r1, #24] @ Load FLASHCTL
66 tst r3, #32 @ Is chip ready?
67 beq .SHARP1
68 ldrb r2, [r1, #20] @ NAND Manufacturer ID
69 ldrb r3, [r1, #20] @ NAND Chip ID
70 ldr r7, .SHEPHERDID 68 ldr r7, .SHEPHERDID
71 cmp r3, #0x76 @ 64MiB flash 69 cmp r3, #0x76 @ 64MiB flash
72 beq .SHARPEND @ We have Shepherd 70 beq .SHARPEND @ We have Shepherd
71
72/* Must be a Husky */
73 ldr r7, .HUSKYID @ Must be Husky 73 ldr r7, .HUSKYID @ Must be Husky
74 b .SHARPEND 74 b .SHARPEND
75 75
76.PXA270:
77/* Check for 16MiB flash - if found we have Spitz */
78 bl get_flash_ids
79 ldr r7, .SPITZID
80 cmp r3, #0x73 @ 16MiB flash
81 beq .SHARPEND @ We have Spitz
82
83/* Check for a second SCOOP chip - if found we have Borzoi */
84 ldr r1, .SCOOP2ADDR
85 ldr r7, .BORZOIID
86 mov r6, #0x0140
87 strh r6, [r1]
88 ldrh r6, [r1]
89 cmp r6, #0x0140
90 beq .SHARPEND @ We have Borzoi
91
92/* Must be Akita */
93 ldr r7, .AKITAID
94 b .SHARPEND @ We have Borzoi
95
76.PXA255ID: 96.PXA255ID:
77 .word 0x69052d00 @ PXA255 Processor ID 97 .word 0x69052d00 @ PXA255 Processor ID
98.PXA270ID:
99 .word 0x69054100 @ PXA270 Processor ID
78.W100ID: 100.W100ID:
79 .word 0x57411002 @ w100 Chip ID 101 .word 0x57411002 @ w100 Chip ID
80.W100ADDR: 102.W100ADDR:
81 .word 0x08010000 @ w100 Chip ID Reg Address 103 .word 0x08010000 @ w100 Chip ID Reg Address
104.SCOOP2ADDR:
105 .word 0x08800040
82.POODLEID: 106.POODLEID:
83 .word MACH_TYPE_POODLE 107 .word MACH_TYPE_POODLE
84.CORGIID: 108.CORGIID:
@@ -87,6 +111,41 @@ __SharpSL_start:
87 .word MACH_TYPE_SHEPHERD 111 .word MACH_TYPE_SHEPHERD
88.HUSKYID: 112.HUSKYID:
89 .word MACH_TYPE_HUSKY 113 .word MACH_TYPE_HUSKY
90.SHARPEND: 114.TOSAID:
115 .word MACH_TYPE_TOSA
116.SPITZID:
117 .word MACH_TYPE_SPITZ
118.AKITAID:
119 .word MACH_TYPE_AKITA
120.BORZOIID:
121 .word MACH_TYPE_BORZOI
91 122
123/*
124 * Return: r2 - NAND Manufacturer ID
125 * r3 - NAND Chip ID
126 * Corrupts: r1
127 */
128get_flash_ids:
129 mov r1, #0x0c000000 @ Base address of NAND chip
130 ldrb r3, [r1, #24] @ Load FLASHCTL
131 bic r3, r3, #0x11 @ SET NCE
132 orr r3, r3, #0x0a @ SET CLR + FLWP
133 strb r3, [r1, #24] @ Save to FLASHCTL
134 mov r2, #0x90 @ Command "readid"
135 strb r2, [r1, #20] @ Save to FLASHIO
136 bic r3, r3, #2 @ CLR CLE
137 orr r3, r3, #4 @ SET ALE
138 strb r3, [r1, #24] @ Save to FLASHCTL
139 mov r2, #0 @ Address 0x00
140 strb r2, [r1, #20] @ Save to FLASHIO
141 bic r3, r3, #4 @ CLR ALE
142 strb r3, [r1, #24] @ Save to FLASHCTL
143.fids1:
144 ldrb r3, [r1, #24] @ Load FLASHCTL
145 tst r3, #32 @ Is chip ready?
146 beq .fids1
147 ldrb r2, [r1, #20] @ NAND Manufacturer ID
148 ldrb r3, [r1, #20] @ NAND Chip ID
149 mov pc, lr
92 150
151.SHARPEND:
diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig
index 24955263b096..4198677cd394 100644
--- a/arch/arm/configs/omap_h2_1610_defconfig
+++ b/arch/arm/configs/omap_h2_1610_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.13-rc2 3# Linux kernel version: 2.6.13
4# Fri Jul 8 04:49:34 2005 4# Mon Sep 5 18:07:12 2005
5# 5#
6CONFIG_ARM=y 6CONFIG_ARM=y
7CONFIG_MMU=y 7CONFIG_MMU=y
@@ -102,9 +102,11 @@ CONFIG_OMAP_MUX_WARNINGS=y
102# CONFIG_OMAP_MPU_TIMER is not set 102# CONFIG_OMAP_MPU_TIMER is not set
103CONFIG_OMAP_32K_TIMER=y 103CONFIG_OMAP_32K_TIMER=y
104CONFIG_OMAP_32K_TIMER_HZ=128 104CONFIG_OMAP_32K_TIMER_HZ=128
105# CONFIG_OMAP_DM_TIMER is not set
105CONFIG_OMAP_LL_DEBUG_UART1=y 106CONFIG_OMAP_LL_DEBUG_UART1=y
106# CONFIG_OMAP_LL_DEBUG_UART2 is not set 107# CONFIG_OMAP_LL_DEBUG_UART2 is not set
107# CONFIG_OMAP_LL_DEBUG_UART3 is not set 108# CONFIG_OMAP_LL_DEBUG_UART3 is not set
109CONFIG_OMAP_SERIAL_WAKE=y
108 110
109# 111#
110# OMAP Core Type 112# OMAP Core Type
@@ -166,7 +168,6 @@ CONFIG_ISA_DMA_API=y
166# 168#
167# Kernel Features 169# Kernel Features
168# 170#
169# CONFIG_SMP is not set
170CONFIG_PREEMPT=y 171CONFIG_PREEMPT=y
171CONFIG_NO_IDLE_HZ=y 172CONFIG_NO_IDLE_HZ=y
172# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set 173# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
@@ -230,91 +231,82 @@ CONFIG_PM=y
230# CONFIG_APM is not set 231# CONFIG_APM is not set
231 232
232# 233#
233# Device Drivers 234# Networking
234#
235
236#
237# Generic Driver Options
238#
239CONFIG_STANDALONE=y
240CONFIG_PREVENT_FIRMWARE_BUILD=y
241# CONFIG_FW_LOADER is not set
242
243#
244# Memory Technology Devices (MTD)
245# 235#
246CONFIG_MTD=y 236CONFIG_NET=y
247CONFIG_MTD_DEBUG=y
248CONFIG_MTD_DEBUG_VERBOSE=3
249# CONFIG_MTD_CONCAT is not set
250CONFIG_MTD_PARTITIONS=y
251# CONFIG_MTD_REDBOOT_PARTS is not set
252CONFIG_MTD_CMDLINE_PARTS=y
253# CONFIG_MTD_AFS_PARTS is not set
254 237
255# 238#
256# User Modules And Translation Layers 239# Networking options
257# 240#
258CONFIG_MTD_CHAR=y 241CONFIG_PACKET=y
259CONFIG_MTD_BLOCK=y 242# CONFIG_PACKET_MMAP is not set
260# CONFIG_FTL is not set 243CONFIG_UNIX=y
261# CONFIG_NFTL is not set 244# CONFIG_NET_KEY is not set
262# CONFIG_INFTL is not set 245CONFIG_INET=y
246# CONFIG_IP_MULTICAST is not set
247# CONFIG_IP_ADVANCED_ROUTER is not set
248CONFIG_IP_FIB_HASH=y
249CONFIG_IP_PNP=y
250CONFIG_IP_PNP_DHCP=y
251CONFIG_IP_PNP_BOOTP=y
252# CONFIG_IP_PNP_RARP is not set
253# CONFIG_NET_IPIP is not set
254# CONFIG_NET_IPGRE is not set
255# CONFIG_ARPD is not set
256# CONFIG_SYN_COOKIES is not set
257# CONFIG_INET_AH is not set
258# CONFIG_INET_ESP is not set
259# CONFIG_INET_IPCOMP is not set
260# CONFIG_INET_TUNNEL is not set
261CONFIG_IP_TCPDIAG=y
262# CONFIG_IP_TCPDIAG_IPV6 is not set
263# CONFIG_TCP_CONG_ADVANCED is not set
264CONFIG_TCP_CONG_BIC=y
265# CONFIG_IPV6 is not set
266# CONFIG_NETFILTER is not set
263 267
264# 268#
265# RAM/ROM/Flash chip drivers 269# SCTP Configuration (EXPERIMENTAL)
266# 270#
267CONFIG_MTD_CFI=y 271# CONFIG_IP_SCTP is not set
268# CONFIG_MTD_JEDECPROBE is not set 272# CONFIG_ATM is not set
269CONFIG_MTD_GEN_PROBE=y 273# CONFIG_BRIDGE is not set
270# CONFIG_MTD_CFI_ADV_OPTIONS is not set 274# CONFIG_VLAN_8021Q is not set
271CONFIG_MTD_MAP_BANK_WIDTH_1=y 275# CONFIG_DECNET is not set
272CONFIG_MTD_MAP_BANK_WIDTH_2=y 276# CONFIG_LLC2 is not set
273CONFIG_MTD_MAP_BANK_WIDTH_4=y 277# CONFIG_IPX is not set
274# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set 278# CONFIG_ATALK is not set
275# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set 279# CONFIG_X25 is not set
276# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set 280# CONFIG_LAPB is not set
277CONFIG_MTD_CFI_I1=y 281# CONFIG_NET_DIVERT is not set
278CONFIG_MTD_CFI_I2=y 282# CONFIG_ECONET is not set
279# CONFIG_MTD_CFI_I4 is not set 283# CONFIG_WAN_ROUTER is not set
280# CONFIG_MTD_CFI_I8 is not set 284# CONFIG_NET_SCHED is not set
281CONFIG_MTD_CFI_INTELEXT=y 285# CONFIG_NET_CLS_ROUTE is not set
282# CONFIG_MTD_CFI_AMDSTD is not set
283# CONFIG_MTD_CFI_STAA is not set
284CONFIG_MTD_CFI_UTIL=y
285# CONFIG_MTD_RAM is not set
286# CONFIG_MTD_ROM is not set
287# CONFIG_MTD_ABSENT is not set
288# CONFIG_MTD_XIP is not set
289 286
290# 287#
291# Mapping drivers for chip access 288# Network testing
292# 289#
293# CONFIG_MTD_COMPLEX_MAPPINGS is not set 290# CONFIG_NET_PKTGEN is not set
294# CONFIG_MTD_PHYSMAP is not set 291# CONFIG_HAMRADIO is not set
295# CONFIG_MTD_ARM_INTEGRATOR is not set 292# CONFIG_IRDA is not set
296# CONFIG_MTD_EDB7312 is not set 293# CONFIG_BT is not set
297 294
298# 295#
299# Self-contained MTD device drivers 296# Device Drivers
300# 297#
301# CONFIG_MTD_SLRAM is not set
302# CONFIG_MTD_PHRAM is not set
303# CONFIG_MTD_MTDRAM is not set
304# CONFIG_MTD_BLKMTD is not set
305# CONFIG_MTD_BLOCK2MTD is not set
306 298
307# 299#
308# Disk-On-Chip Device Drivers 300# Generic Driver Options
309# 301#
310# CONFIG_MTD_DOC2000 is not set 302CONFIG_STANDALONE=y
311# CONFIG_MTD_DOC2001 is not set 303CONFIG_PREVENT_FIRMWARE_BUILD=y
312# CONFIG_MTD_DOC2001PLUS is not set 304# CONFIG_FW_LOADER is not set
313 305
314# 306#
315# NAND Flash Device Drivers 307# Memory Technology Devices (MTD)
316# 308#
317# CONFIG_MTD_NAND is not set 309# CONFIG_MTD is not set
318 310
319# 311#
320# Parallel port support 312# Parallel port support
@@ -403,72 +395,8 @@ CONFIG_SCSI_PROC_FS=y
403# 395#
404 396
405# 397#
406# Networking support 398# Network device support
407#
408CONFIG_NET=y
409
410#
411# Networking options
412#
413CONFIG_PACKET=y
414# CONFIG_PACKET_MMAP is not set
415CONFIG_UNIX=y
416# CONFIG_NET_KEY is not set
417CONFIG_INET=y
418# CONFIG_IP_MULTICAST is not set
419# CONFIG_IP_ADVANCED_ROUTER is not set
420CONFIG_IP_FIB_HASH=y
421CONFIG_IP_PNP=y
422CONFIG_IP_PNP_DHCP=y
423CONFIG_IP_PNP_BOOTP=y
424# CONFIG_IP_PNP_RARP is not set
425# CONFIG_NET_IPIP is not set
426# CONFIG_NET_IPGRE is not set
427# CONFIG_ARPD is not set
428# CONFIG_SYN_COOKIES is not set
429# CONFIG_INET_AH is not set
430# CONFIG_INET_ESP is not set
431# CONFIG_INET_IPCOMP is not set
432# CONFIG_INET_TUNNEL is not set
433CONFIG_IP_TCPDIAG=y
434# CONFIG_IP_TCPDIAG_IPV6 is not set
435# CONFIG_TCP_CONG_ADVANCED is not set
436CONFIG_TCP_CONG_BIC=y
437# CONFIG_IPV6 is not set
438# CONFIG_NETFILTER is not set
439
440#
441# SCTP Configuration (EXPERIMENTAL)
442# 399#
443# CONFIG_IP_SCTP is not set
444# CONFIG_ATM is not set
445# CONFIG_BRIDGE is not set
446# CONFIG_VLAN_8021Q is not set
447# CONFIG_DECNET is not set
448# CONFIG_LLC2 is not set
449# CONFIG_IPX is not set
450# CONFIG_ATALK is not set
451# CONFIG_X25 is not set
452# CONFIG_LAPB is not set
453# CONFIG_NET_DIVERT is not set
454# CONFIG_ECONET is not set
455# CONFIG_WAN_ROUTER is not set
456
457#
458# QoS and/or fair queueing
459#
460# CONFIG_NET_SCHED is not set
461# CONFIG_NET_CLS_ROUTE is not set
462
463#
464# Network testing
465#
466# CONFIG_NET_PKTGEN is not set
467# CONFIG_NETPOLL is not set
468# CONFIG_NET_POLL_CONTROLLER is not set
469# CONFIG_HAMRADIO is not set
470# CONFIG_IRDA is not set
471# CONFIG_BT is not set
472CONFIG_NETDEVICES=y 400CONFIG_NETDEVICES=y
473# CONFIG_DUMMY is not set 401# CONFIG_DUMMY is not set
474# CONFIG_BONDING is not set 402# CONFIG_BONDING is not set
@@ -518,6 +446,8 @@ CONFIG_SLIP_COMPRESSED=y
518# CONFIG_SLIP_MODE_SLIP6 is not set 446# CONFIG_SLIP_MODE_SLIP6 is not set
519# CONFIG_SHAPER is not set 447# CONFIG_SHAPER is not set
520# CONFIG_NETCONSOLE is not set 448# CONFIG_NETCONSOLE is not set
449# CONFIG_NETPOLL is not set
450# CONFIG_NET_POLL_CONTROLLER is not set
521 451
522# 452#
523# ISDN subsystem 453# ISDN subsystem
@@ -615,77 +545,15 @@ CONFIG_WATCHDOG_NOWAYOUT=y
615# 545#
616# I2C support 546# I2C support
617# 547#
618CONFIG_I2C=y 548# CONFIG_I2C is not set
619CONFIG_I2C_CHARDEV=y 549# CONFIG_I2C_SENSOR is not set
620 550CONFIG_ISP1301_OMAP=y
621#
622# I2C Algorithms
623#
624# CONFIG_I2C_ALGOBIT is not set
625# CONFIG_I2C_ALGOPCF is not set
626# CONFIG_I2C_ALGOPCA is not set
627
628#
629# I2C Hardware Bus support
630#
631# CONFIG_I2C_ISA is not set
632# CONFIG_I2C_PARPORT_LIGHT is not set
633# CONFIG_I2C_STUB is not set
634# CONFIG_I2C_PCA_ISA is not set
635 551
636# 552#
637# Hardware Sensors Chip support 553# Hardware Monitoring support
638# 554#
639# CONFIG_I2C_SENSOR is not set 555CONFIG_HWMON=y
640# CONFIG_SENSORS_ADM1021 is not set 556# CONFIG_HWMON_DEBUG_CHIP is not set
641# CONFIG_SENSORS_ADM1025 is not set
642# CONFIG_SENSORS_ADM1026 is not set
643# CONFIG_SENSORS_ADM1031 is not set
644# CONFIG_SENSORS_ADM9240 is not set
645# CONFIG_SENSORS_ASB100 is not set
646# CONFIG_SENSORS_ATXP1 is not set
647# CONFIG_SENSORS_DS1621 is not set
648# CONFIG_SENSORS_FSCHER is not set
649# CONFIG_SENSORS_FSCPOS is not set
650# CONFIG_SENSORS_GL518SM is not set
651# CONFIG_SENSORS_GL520SM is not set
652# CONFIG_SENSORS_IT87 is not set
653# CONFIG_SENSORS_LM63 is not set
654# CONFIG_SENSORS_LM75 is not set
655# CONFIG_SENSORS_LM77 is not set
656# CONFIG_SENSORS_LM78 is not set
657# CONFIG_SENSORS_LM80 is not set
658# CONFIG_SENSORS_LM83 is not set
659# CONFIG_SENSORS_LM85 is not set
660# CONFIG_SENSORS_LM87 is not set
661# CONFIG_SENSORS_LM90 is not set
662# CONFIG_SENSORS_LM92 is not set
663# CONFIG_SENSORS_MAX1619 is not set
664# CONFIG_SENSORS_PC87360 is not set
665# CONFIG_SENSORS_SMSC47B397 is not set
666# CONFIG_SENSORS_SMSC47M1 is not set
667# CONFIG_SENSORS_W83781D is not set
668# CONFIG_SENSORS_W83L785TS is not set
669# CONFIG_SENSORS_W83627HF is not set
670# CONFIG_SENSORS_W83627EHF is not set
671
672#
673# Other I2C Chip support
674#
675# CONFIG_SENSORS_DS1337 is not set
676# CONFIG_SENSORS_DS1374 is not set
677# CONFIG_SENSORS_EEPROM is not set
678# CONFIG_SENSORS_PCF8574 is not set
679# CONFIG_SENSORS_PCA9539 is not set
680# CONFIG_SENSORS_PCF8591 is not set
681# CONFIG_SENSORS_RTC8564 is not set
682CONFIG_ISP1301_OMAP=y
683CONFIG_TPS65010=y
684# CONFIG_SENSORS_MAX6875 is not set
685# CONFIG_I2C_DEBUG_CORE is not set
686# CONFIG_I2C_DEBUG_ALGO is not set
687# CONFIG_I2C_DEBUG_BUS is not set
688# CONFIG_I2C_DEBUG_CHIP is not set
689 557
690# 558#
691# Misc devices 559# Misc devices
@@ -756,15 +624,9 @@ CONFIG_SOUND=y
756# Open Sound System 624# Open Sound System
757# 625#
758CONFIG_SOUND_PRIME=y 626CONFIG_SOUND_PRIME=y
759# CONFIG_SOUND_BT878 is not set
760# CONFIG_SOUND_FUSION is not set
761# CONFIG_SOUND_CS4281 is not set
762# CONFIG_SOUND_SONICVIBES is not set
763# CONFIG_SOUND_TRIDENT is not set
764# CONFIG_SOUND_MSNDCLAS is not set 627# CONFIG_SOUND_MSNDCLAS is not set
765# CONFIG_SOUND_MSNDPIN is not set 628# CONFIG_SOUND_MSNDPIN is not set
766# CONFIG_SOUND_OSS is not set 629# CONFIG_SOUND_OSS is not set
767# CONFIG_SOUND_TVMIXER is not set
768# CONFIG_SOUND_AD1980 is not set 630# CONFIG_SOUND_AD1980 is not set
769 631
770# 632#
@@ -810,6 +672,7 @@ CONFIG_EXT2_FS=y
810# CONFIG_JBD is not set 672# CONFIG_JBD is not set
811# CONFIG_REISERFS_FS is not set 673# CONFIG_REISERFS_FS is not set
812# CONFIG_JFS_FS is not set 674# CONFIG_JFS_FS is not set
675# CONFIG_FS_POSIX_ACL is not set
813 676
814# 677#
815# XFS support 678# XFS support
@@ -817,6 +680,7 @@ CONFIG_EXT2_FS=y
817# CONFIG_XFS_FS is not set 680# CONFIG_XFS_FS is not set
818# CONFIG_MINIX_FS is not set 681# CONFIG_MINIX_FS is not set
819CONFIG_ROMFS_FS=y 682CONFIG_ROMFS_FS=y
683CONFIG_INOTIFY=y
820# CONFIG_QUOTA is not set 684# CONFIG_QUOTA is not set
821CONFIG_DNOTIFY=y 685CONFIG_DNOTIFY=y
822# CONFIG_AUTOFS_FS is not set 686# CONFIG_AUTOFS_FS is not set
@@ -857,15 +721,6 @@ CONFIG_RAMFS=y
857# CONFIG_BEFS_FS is not set 721# CONFIG_BEFS_FS is not set
858# CONFIG_BFS_FS is not set 722# CONFIG_BFS_FS is not set
859# CONFIG_EFS_FS is not set 723# CONFIG_EFS_FS is not set
860# CONFIG_JFFS_FS is not set
861CONFIG_JFFS2_FS=y
862CONFIG_JFFS2_FS_DEBUG=2
863# CONFIG_JFFS2_FS_NAND is not set
864# CONFIG_JFFS2_FS_NOR_ECC is not set
865# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
866CONFIG_JFFS2_ZLIB=y
867CONFIG_JFFS2_RTIME=y
868# CONFIG_JFFS2_RUBIN is not set
869CONFIG_CRAMFS=y 724CONFIG_CRAMFS=y
870# CONFIG_VXFS_FS is not set 725# CONFIG_VXFS_FS is not set
871# CONFIG_HPFS_FS is not set 726# CONFIG_HPFS_FS is not set
@@ -1007,4 +862,3 @@ CONFIG_CRYPTO_DES=y
1007CONFIG_CRC32=y 862CONFIG_CRC32=y
1008# CONFIG_LIBCRC32C is not set 863# CONFIG_LIBCRC32C is not set
1009CONFIG_ZLIB_INFLATE=y 864CONFIG_ZLIB_INFLATE=y
1010CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 8880482dcbff..69449a818dcc 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -102,7 +102,7 @@ static unsigned long next_rtc_update;
102 */ 102 */
103static inline void do_set_rtc(void) 103static inline void do_set_rtc(void)
104{ 104{
105 if (time_status & STA_UNSYNC || set_rtc == NULL) 105 if (!ntp_synced() || set_rtc == NULL)
106 return; 106 return;
107 107
108 if (next_rtc_update && 108 if (next_rtc_update &&
@@ -292,10 +292,7 @@ int do_settimeofday(struct timespec *tv)
292 set_normalized_timespec(&xtime, sec, nsec); 292 set_normalized_timespec(&xtime, sec, nsec);
293 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 293 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
294 294
295 time_adjust = 0; /* stop active adjtime() */ 295 ntp_clear();
296 time_status |= STA_UNSYNC;
297 time_maxerror = NTP_PHASE_LIMIT;
298 time_esterror = NTP_PHASE_LIMIT;
299 write_sequnlock_irq(&xtime_lock); 296 write_sequnlock_irq(&xtime_lock);
300 clock_was_set(); 297 clock_was_set();
301 return 0; 298 return 0;
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index 324d9edeec38..bdd257921cfb 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -87,6 +87,7 @@ config FOOTBRIDGE_ADDIN
87 87
88# EBSA285 board in either host or addin mode 88# EBSA285 board in either host or addin mode
89config ARCH_EBSA285 89config ARCH_EBSA285
90 select ARCH_MAY_HAVE_PC_FDC
90 bool 91 bool
91 92
92endif 93endif
diff --git a/arch/arm/mach-iop3xx/iop321-time.c b/arch/arm/mach-iop3xx/iop321-time.c
index d53af1669502..0039793b694a 100644
--- a/arch/arm/mach-iop3xx/iop321-time.c
+++ b/arch/arm/mach-iop3xx/iop321-time.c
@@ -60,7 +60,7 @@ static unsigned long iop321_gettimeoffset(void)
60 /* 60 /*
61 * Now convert them to usec. 61 * Now convert them to usec.
62 */ 62 */
63 usec = (unsigned long)(elapsed * (tick_nsec / 1000)) / LATCH; 63 usec = (unsigned long)(elapsed / (CLOCK_TICK_RATE/1000000));
64 64
65 return usec; 65 return usec;
66} 66}
diff --git a/arch/arm/mach-iop3xx/iop331-time.c b/arch/arm/mach-iop3xx/iop331-time.c
index 1a6d9d661e4b..8eddfac7e2b0 100644
--- a/arch/arm/mach-iop3xx/iop331-time.c
+++ b/arch/arm/mach-iop3xx/iop331-time.c
@@ -58,7 +58,7 @@ static unsigned long iop331_gettimeoffset(void)
58 /* 58 /*
59 * Now convert them to usec. 59 * Now convert them to usec.
60 */ 60 */
61 usec = (unsigned long)(elapsed * (tick_nsec / 1000)) / LATCH; 61 usec = (unsigned long)(elapsed / (CLOCK_TICK_RATE/1000000));
62 62
63 return usec; 63 return usec;
64} 64}
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index 781d10ae00b7..098c817a7fb8 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -382,7 +382,7 @@ static void ixp2000_GPIO_irq_unmask(unsigned int irq)
382static struct irqchip ixp2000_GPIO_irq_chip = { 382static struct irqchip ixp2000_GPIO_irq_chip = {
383 .ack = ixp2000_GPIO_irq_mask_ack, 383 .ack = ixp2000_GPIO_irq_mask_ack,
384 .mask = ixp2000_GPIO_irq_mask, 384 .mask = ixp2000_GPIO_irq_mask,
385 .unmask = ixp2000_GPIO_irq_unmask 385 .unmask = ixp2000_GPIO_irq_unmask,
386 .set_type = ixp2000_GPIO_irq_type, 386 .set_type = ixp2000_GPIO_irq_type,
387}; 387};
388 388
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 0422e906cc9a..52ad11328e96 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -179,17 +179,17 @@ static void ixp4xx_irq_level_unmask(unsigned int irq)
179} 179}
180 180
181static struct irqchip ixp4xx_irq_level_chip = { 181static struct irqchip ixp4xx_irq_level_chip = {
182 .ack = ixp4xx_irq_mask, 182 .ack = ixp4xx_irq_mask,
183 .mask = ixp4xx_irq_mask, 183 .mask = ixp4xx_irq_mask,
184 .unmask = ixp4xx_irq_level_unmask, 184 .unmask = ixp4xx_irq_level_unmask,
185 .type = ixp4xx_set_irq_type 185 .set_type = ixp4xx_set_irq_type,
186}; 186};
187 187
188static struct irqchip ixp4xx_irq_edge_chip = { 188static struct irqchip ixp4xx_irq_edge_chip = {
189 .ack = ixp4xx_irq_ack, 189 .ack = ixp4xx_irq_ack,
190 .mask = ixp4xx_irq_mask, 190 .mask = ixp4xx_irq_mask,
191 .unmask = ixp4xx_irq_unmask, 191 .unmask = ixp4xx_irq_unmask,
192 .type = ixp4xx_set_irq_type 192 .set_type = ixp4xx_set_irq_type,
193}; 193};
194 194
195static void ixp4xx_config_irq(unsigned irq, enum ixp4xx_irq_type type) 195static void ixp4xx_config_irq(unsigned irq, enum ixp4xx_irq_type type)
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index a11b6d807352..afd5d67e4ae7 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -165,10 +165,10 @@ static struct omap_irq_bank omap1610_irq_banks[] = {
165#endif 165#endif
166 166
167static struct irqchip omap_irq_chip = { 167static struct irqchip omap_irq_chip = {
168 .ack = omap_mask_ack_irq, 168 .ack = omap_mask_ack_irq,
169 .mask = omap_mask_irq, 169 .mask = omap_mask_irq,
170 .unmask = omap_unmask_irq, 170 .unmask = omap_unmask_irq,
171 .wake = omap_wake_irq, 171 .set_wake = omap_wake_irq,
172}; 172};
173 173
174void __init omap_init_irq(void) 174void __init omap_init_irq(void)
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index efc2f657184e..33dae99ec2d8 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_PXA27x) += pxa27x.o
11obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o 11obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
12obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o 12obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o
13obj-$(CONFIG_ARCH_PXA_IDP) += idp.o 13obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
14obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o ssp.o 14obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o ssp.o
15obj-$(CONFIG_MACH_POODLE) += poodle.o 15obj-$(CONFIG_MACH_POODLE) += poodle.o
16 16
17# Support for blinky lights 17# Support for blinky lights
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 06ea730e8675..29185acdd9e1 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -39,7 +39,6 @@
39 39
40#include <asm/mach/sharpsl_param.h> 40#include <asm/mach/sharpsl_param.h>
41#include <asm/hardware/scoop.h> 41#include <asm/hardware/scoop.h>
42#include <video/w100fb.h>
43 42
44#include "generic.h" 43#include "generic.h"
45 44
@@ -87,7 +86,7 @@ struct platform_device corgiscoop_device = {
87 * also use scoop functions and this makes the power up/down order 86 * also use scoop functions and this makes the power up/down order
88 * work correctly. 87 * work correctly.
89 */ 88 */
90static struct platform_device corgissp_device = { 89struct platform_device corgissp_device = {
91 .name = "corgi-ssp", 90 .name = "corgi-ssp",
92 .dev = { 91 .dev = {
93 .parent = &corgiscoop_device.dev, 92 .parent = &corgiscoop_device.dev,
@@ -97,41 +96,33 @@ static struct platform_device corgissp_device = {
97 96
98 97
99/* 98/*
100 * Corgi w100 Frame Buffer Device 99 * Corgi Backlight Device
101 */ 100 */
102static struct w100fb_mach_info corgi_fb_info = { 101static struct platform_device corgibl_device = {
103 .w100fb_ssp_send = corgi_ssp_lcdtg_send, 102 .name = "corgi-bl",
104 .comadj = -1, 103 .dev = {
105 .phadadj = -1, 104 .parent = &corgifb_device.dev,
106};
107
108static struct resource corgi_fb_resources[] = {
109 [0] = {
110 .start = 0x08000000,
111 .end = 0x08ffffff,
112 .flags = IORESOURCE_MEM,
113 }, 105 },
106 .id = -1,
114}; 107};
115 108
116static struct platform_device corgifb_device = { 109
117 .name = "w100fb", 110/*
111 * Corgi Keyboard Device
112 */
113static struct platform_device corgikbd_device = {
114 .name = "corgi-keyboard",
118 .id = -1, 115 .id = -1,
119 .dev = {
120 .platform_data = &corgi_fb_info,
121 .parent = &corgissp_device.dev,
122 },
123 .num_resources = ARRAY_SIZE(corgi_fb_resources),
124 .resource = corgi_fb_resources,
125}; 116};
126 117
127 118
128/* 119/*
129 * Corgi Backlight Device 120 * Corgi Touch Screen Device
130 */ 121 */
131static struct platform_device corgibl_device = { 122static struct platform_device corgits_device = {
132 .name = "corgi-bl", 123 .name = "corgi-ts",
133 .dev = { 124 .dev = {
134 .parent = &corgifb_device.dev, 125 .parent = &corgissp_device.dev,
135 }, 126 },
136 .id = -1, 127 .id = -1,
137}; 128};
@@ -199,6 +190,11 @@ static void corgi_mci_setpower(struct device *dev, unsigned int vdd)
199 } 190 }
200} 191}
201 192
193static int corgi_mci_get_ro(struct device *dev)
194{
195 return GPLR(CORGI_GPIO_nSD_WP) & GPIO_bit(CORGI_GPIO_nSD_WP);
196}
197
202static void corgi_mci_exit(struct device *dev, void *data) 198static void corgi_mci_exit(struct device *dev, void *data)
203{ 199{
204 free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data); 200 free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data);
@@ -208,11 +204,13 @@ static void corgi_mci_exit(struct device *dev, void *data)
208static struct pxamci_platform_data corgi_mci_platform_data = { 204static struct pxamci_platform_data corgi_mci_platform_data = {
209 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, 205 .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
210 .init = corgi_mci_init, 206 .init = corgi_mci_init,
207 .get_ro = corgi_mci_get_ro,
211 .setpower = corgi_mci_setpower, 208 .setpower = corgi_mci_setpower,
212 .exit = corgi_mci_exit, 209 .exit = corgi_mci_exit,
213}; 210};
214 211
215 212
213
216/* 214/*
217 * USB Device Controller 215 * USB Device Controller
218 */ 216 */
@@ -238,14 +236,13 @@ static struct platform_device *devices[] __initdata = {
238 &corgiscoop_device, 236 &corgiscoop_device,
239 &corgissp_device, 237 &corgissp_device,
240 &corgifb_device, 238 &corgifb_device,
239 &corgikbd_device,
241 &corgibl_device, 240 &corgibl_device,
241 &corgits_device,
242}; 242};
243 243
244static void __init corgi_init(void) 244static void __init corgi_init(void)
245{ 245{
246 corgi_fb_info.comadj=sharpsl_param.comadj;
247 corgi_fb_info.phadadj=sharpsl_param.phadadj;
248
249 pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT); 246 pxa_gpio_mode(CORGI_GPIO_USB_PULLUP | GPIO_OUT);
250 pxa_set_udc_info(&udc_info); 247 pxa_set_udc_info(&udc_info);
251 pxa_set_mci_info(&corgi_mci_platform_data); 248 pxa_set_mci_info(&corgi_mci_platform_data);
diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c
new file mode 100644
index 000000000000..deac29c00290
--- /dev/null
+++ b/arch/arm/mach-pxa/corgi_lcd.c
@@ -0,0 +1,396 @@
1/*
2 * linux/drivers/video/w100fb.c
3 *
4 * Corgi LCD Specific Code for ATI Imageon w100 (Wallaby)
5 *
6 * Copyright (C) 2005 Richard Purdie
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#include <linux/delay.h>
15#include <linux/kernel.h>
16#include <linux/device.h>
17#include <asm/arch/corgi.h>
18#include <asm/mach/sharpsl_param.h>
19#include <video/w100fb.h>
20
21/* Register Addresses */
22#define RESCTL_ADRS 0x00
23#define PHACTRL_ADRS 0x01
24#define DUTYCTRL_ADRS 0x02
25#define POWERREG0_ADRS 0x03
26#define POWERREG1_ADRS 0x04
27#define GPOR3_ADRS 0x05
28#define PICTRL_ADRS 0x06
29#define POLCTRL_ADRS 0x07
30
31/* Resgister Bit Definitions */
32#define RESCTL_QVGA 0x01
33#define RESCTL_VGA 0x00
34
35#define POWER1_VW_ON 0x01 /* VW Supply FET ON */
36#define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */
37#define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */
38
39#define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */
40#define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */
41#define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */
42
43#define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */
44#define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */
45#define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */
46#define POWER0_COM_ON 0x08 /* COM Powewr Supply ON */
47#define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */
48
49#define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */
50#define POWER0_COM_OFF 0x00 /* COM Powewr Supply OFF */
51#define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */
52
53#define PICTRL_INIT_STATE 0x01
54#define PICTRL_INIOFF 0x02
55#define PICTRL_POWER_DOWN 0x04
56#define PICTRL_COM_SIGNAL_OFF 0x08
57#define PICTRL_DAC_SIGNAL_OFF 0x10
58
59#define POLCTRL_SYNC_POL_FALL 0x01
60#define POLCTRL_EN_POL_FALL 0x02
61#define POLCTRL_DATA_POL_FALL 0x04
62#define POLCTRL_SYNC_ACT_H 0x08
63#define POLCTRL_EN_ACT_L 0x10
64
65#define POLCTRL_SYNC_POL_RISE 0x00
66#define POLCTRL_EN_POL_RISE 0x00
67#define POLCTRL_DATA_POL_RISE 0x00
68#define POLCTRL_SYNC_ACT_L 0x00
69#define POLCTRL_EN_ACT_H 0x00
70
71#define PHACTRL_PHASE_MANUAL 0x01
72#define DEFAULT_PHAD_QVGA (9)
73#define DEFAULT_COMADJ (125)
74
75/*
76 * This is only a psuedo I2C interface. We can't use the standard kernel
77 * routines as the interface is write only. We just assume the data is acked...
78 */
79static void lcdtg_ssp_i2c_send(u8 data)
80{
81 corgi_ssp_lcdtg_send(POWERREG0_ADRS, data);
82 udelay(10);
83}
84
85static void lcdtg_i2c_send_bit(u8 data)
86{
87 lcdtg_ssp_i2c_send(data);
88 lcdtg_ssp_i2c_send(data | POWER0_COM_DCLK);
89 lcdtg_ssp_i2c_send(data);
90}
91
92static void lcdtg_i2c_send_start(u8 base)
93{
94 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
95 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
96 lcdtg_ssp_i2c_send(base);
97}
98
99static void lcdtg_i2c_send_stop(u8 base)
100{
101 lcdtg_ssp_i2c_send(base);
102 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
103 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
104}
105
106static void lcdtg_i2c_send_byte(u8 base, u8 data)
107{
108 int i;
109 for (i = 0; i < 8; i++) {
110 if (data & 0x80)
111 lcdtg_i2c_send_bit(base | POWER0_COM_DOUT);
112 else
113 lcdtg_i2c_send_bit(base);
114 data <<= 1;
115 }
116}
117
118static void lcdtg_i2c_wait_ack(u8 base)
119{
120 lcdtg_i2c_send_bit(base);
121}
122
123static void lcdtg_set_common_voltage(u8 base_data, u8 data)
124{
125 /* Set Common Voltage to M62332FP via I2C */
126 lcdtg_i2c_send_start(base_data);
127 lcdtg_i2c_send_byte(base_data, 0x9c);
128 lcdtg_i2c_wait_ack(base_data);
129 lcdtg_i2c_send_byte(base_data, 0x00);
130 lcdtg_i2c_wait_ack(base_data);
131 lcdtg_i2c_send_byte(base_data, data);
132 lcdtg_i2c_wait_ack(base_data);
133 lcdtg_i2c_send_stop(base_data);
134}
135
136/* Set Phase Adjuct */
137static void lcdtg_set_phadadj(struct w100fb_par *par)
138{
139 int adj;
140 switch(par->xres) {
141 case 480:
142 case 640:
143 /* Setting for VGA */
144 adj = sharpsl_param.phadadj;
145 if (adj < 0) {
146 adj = PHACTRL_PHASE_MANUAL;
147 } else {
148 adj = ((adj & 0x0f) << 1) | PHACTRL_PHASE_MANUAL;
149 }
150 break;
151 case 240:
152 case 320:
153 default:
154 /* Setting for QVGA */
155 adj = (DEFAULT_PHAD_QVGA << 1) | PHACTRL_PHASE_MANUAL;
156 break;
157 }
158
159 corgi_ssp_lcdtg_send(PHACTRL_ADRS, adj);
160}
161
162static int lcd_inited;
163
164static void lcdtg_hw_init(struct w100fb_par *par)
165{
166 if (!lcd_inited) {
167 int comadj;
168
169 /* Initialize Internal Logic & Port */
170 corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE
171 | PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF);
172
173 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF
174 | POWER0_COM_OFF | POWER0_VCC5_OFF);
175
176 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
177
178 /* VDD(+8V), SVSS(-4V) ON */
179 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
180 mdelay(3);
181
182 /* DAC ON */
183 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON
184 | POWER0_COM_OFF | POWER0_VCC5_OFF);
185
186 /* INIB = H, INI = L */
187 /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */
188 corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF);
189
190 /* Set Common Voltage */
191 comadj = sharpsl_param.comadj;
192 if (comadj < 0)
193 comadj = DEFAULT_COMADJ;
194 lcdtg_set_common_voltage((POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF), comadj);
195
196 /* VCC5 ON, DAC ON */
197 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON |
198 POWER0_COM_OFF | POWER0_VCC5_ON);
199
200 /* GVSS(-8V) ON, VDD ON */
201 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
202 mdelay(2);
203
204 /* COM SIGNAL ON (PICTL[3] = L) */
205 corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIT_STATE);
206
207 /* COM ON, DAC ON, VCC5_ON */
208 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON
209 | POWER0_COM_ON | POWER0_VCC5_ON);
210
211 /* VW ON, GVSS ON, VDD ON */
212 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_ON | POWER1_GVSS_ON | POWER1_VDD_ON);
213
214 /* Signals output enable */
215 corgi_ssp_lcdtg_send(PICTRL_ADRS, 0);
216
217 /* Set Phase Adjuct */
218 lcdtg_set_phadadj(par);
219
220 /* Initialize for Input Signals from ATI */
221 corgi_ssp_lcdtg_send(POLCTRL_ADRS, POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE
222 | POLCTRL_DATA_POL_RISE | POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H);
223 udelay(1000);
224
225 lcd_inited=1;
226 } else {
227 lcdtg_set_phadadj(par);
228 }
229
230 switch(par->xres) {
231 case 480:
232 case 640:
233 /* Set Lcd Resolution (VGA) */
234 corgi_ssp_lcdtg_send(RESCTL_ADRS, RESCTL_VGA);
235 break;
236 case 240:
237 case 320:
238 default:
239 /* Set Lcd Resolution (QVGA) */
240 corgi_ssp_lcdtg_send(RESCTL_ADRS, RESCTL_QVGA);
241 break;
242 }
243}
244
245static void lcdtg_suspend(struct w100fb_par *par)
246{
247 /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */
248 mdelay(34);
249
250 /* (1)VW OFF */
251 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
252
253 /* (2)COM OFF */
254 corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF);
255 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON);
256
257 /* (3)Set Common Voltage Bias 0V */
258 lcdtg_set_common_voltage(POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0);
259
260 /* (4)GVSS OFF */
261 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
262
263 /* (5)VCC5 OFF */
264 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF);
265
266 /* (6)Set PDWN, INIOFF, DACOFF */
267 corgi_ssp_lcdtg_send(PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF |
268 PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF);
269
270 /* (7)DAC OFF */
271 corgi_ssp_lcdtg_send(POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF);
272
273 /* (8)VDD OFF */
274 corgi_ssp_lcdtg_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
275
276 lcd_inited = 0;
277}
278
279static struct w100_tg_info corgi_lcdtg_info = {
280 .change=lcdtg_hw_init,
281 .suspend=lcdtg_suspend,
282 .resume=lcdtg_hw_init,
283};
284
285/*
286 * Corgi w100 Frame Buffer Device
287 */
288
289static struct w100_mem_info corgi_fb_mem = {
290 .ext_cntl = 0x00040003,
291 .sdram_mode_reg = 0x00650021,
292 .ext_timing_cntl = 0x10002a4a,
293 .io_cntl = 0x7ff87012,
294 .size = 0x1fffff,
295};
296
297static struct w100_gen_regs corgi_fb_regs = {
298 .lcd_format = 0x00000003,
299 .lcdd_cntl1 = 0x01CC0000,
300 .lcdd_cntl2 = 0x0003FFFF,
301 .genlcd_cntl1 = 0x00FFFF0D,
302 .genlcd_cntl2 = 0x003F3003,
303 .genlcd_cntl3 = 0x000102aa,
304};
305
306static struct w100_gpio_regs corgi_fb_gpio = {
307 .init_data1 = 0x000000bf,
308 .init_data2 = 0x00000000,
309 .gpio_dir1 = 0x00000000,
310 .gpio_oe1 = 0x03c0feff,
311 .gpio_dir2 = 0x00000000,
312 .gpio_oe2 = 0x00000000,
313};
314
315static struct w100_mode corgi_fb_modes[] = {
316{
317 .xres = 480,
318 .yres = 640,
319 .left_margin = 0x56,
320 .right_margin = 0x55,
321 .upper_margin = 0x03,
322 .lower_margin = 0x00,
323 .crtc_ss = 0x82360056,
324 .crtc_ls = 0xA0280000,
325 .crtc_gs = 0x80280028,
326 .crtc_vpos_gs = 0x02830002,
327 .crtc_rev = 0x00400008,
328 .crtc_dclk = 0xA0000000,
329 .crtc_gclk = 0x8015010F,
330 .crtc_goe = 0x80100110,
331 .crtc_ps1_active = 0x41060010,
332 .pll_freq = 75,
333 .fast_pll_freq = 100,
334 .sysclk_src = CLK_SRC_PLL,
335 .sysclk_divider = 0,
336 .pixclk_src = CLK_SRC_PLL,
337 .pixclk_divider = 2,
338 .pixclk_divider_rotated = 6,
339},{
340 .xres = 240,
341 .yres = 320,
342 .left_margin = 0x27,
343 .right_margin = 0x2e,
344 .upper_margin = 0x01,
345 .lower_margin = 0x00,
346 .crtc_ss = 0x81170027,
347 .crtc_ls = 0xA0140000,
348 .crtc_gs = 0xC0140014,
349 .crtc_vpos_gs = 0x00010141,
350 .crtc_rev = 0x00400008,
351 .crtc_dclk = 0xA0000000,
352 .crtc_gclk = 0x8015010F,
353 .crtc_goe = 0x80100110,
354 .crtc_ps1_active = 0x41060010,
355 .pll_freq = 0,
356 .fast_pll_freq = 0,
357 .sysclk_src = CLK_SRC_XTAL,
358 .sysclk_divider = 0,
359 .pixclk_src = CLK_SRC_XTAL,
360 .pixclk_divider = 1,
361 .pixclk_divider_rotated = 1,
362},
363
364};
365
366static struct w100fb_mach_info corgi_fb_info = {
367 .tg = &corgi_lcdtg_info,
368 .init_mode = INIT_MODE_ROTATED,
369 .mem = &corgi_fb_mem,
370 .regs = &corgi_fb_regs,
371 .modelist = &corgi_fb_modes[0],
372 .num_modes = 2,
373 .gpio = &corgi_fb_gpio,
374 .xtal_freq = 12500000,
375 .xtal_dbl = 0,
376};
377
378static struct resource corgi_fb_resources[] = {
379 [0] = {
380 .start = 0x08000000,
381 .end = 0x08ffffff,
382 .flags = IORESOURCE_MEM,
383 },
384};
385
386struct platform_device corgifb_device = {
387 .name = "w100fb",
388 .id = -1,
389 .num_resources = ARRAY_SIZE(corgi_fb_resources),
390 .resource = corgi_fb_resources,
391 .dev = {
392 .platform_data = &corgi_fb_info,
393 .parent = &corgissp_device.dev,
394 },
395
396};
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index d4d03d0daaec..06807c6ee68a 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -2,6 +2,13 @@ if ARCH_S3C2410
2 2
3menu "S3C24XX Implementations" 3menu "S3C24XX Implementations"
4 4
5config MACH_ANUBIS
6 bool "Simtec Electronics ANUBIS"
7 select CPU_S3C2440
8 help
9 Say Y gere if you are using the Simtec Electronics ANUBIS
10 development system
11
5config ARCH_BAST 12config ARCH_BAST
6 bool "Simtec Electronics BAST (EB2410ITX)" 13 bool "Simtec Electronics BAST (EB2410ITX)"
7 select CPU_S3C2410 14 select CPU_S3C2410
@@ -11,6 +18,14 @@ config ARCH_BAST
11 18
12 Product page: <http://www.simtec.co.uk/products/EB2410ITX/>. 19 Product page: <http://www.simtec.co.uk/products/EB2410ITX/>.
13 20
21config BAST_PC104_IRQ
22 bool "BAST PC104 IRQ support"
23 depends on ARCH_BAST
24 default y
25 help
26 Say Y here to enable the PC104 IRQ routing on the
27 Simtec BAST (EB2410ITX)
28
14config ARCH_H1940 29config ARCH_H1940
15 bool "IPAQ H1940" 30 bool "IPAQ H1940"
16 select CPU_S3C2410 31 select CPU_S3C2410
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 55ed7c7e57da..b4f1e051c768 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -26,8 +26,13 @@ obj-$(CONFIG_CPU_S3C2440) += s3c2440.o s3c2440-dsc.o
26obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o 26obj-$(CONFIG_CPU_S3C2440) += s3c2440-irq.o
27obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o 27obj-$(CONFIG_CPU_S3C2440) += s3c2440-clock.o
28 28
29# bast extras
30
31obj-$(CONFIG_BAST_PC104_IRQ) += bast-irq.o
32
29# machine specific support 33# machine specific support
30 34
35obj-$(CONFIG_MACH_ANUBIS) += mach-anubis.o
31obj-$(CONFIG_ARCH_BAST) += mach-bast.o usb-simtec.o 36obj-$(CONFIG_ARCH_BAST) += mach-bast.o usb-simtec.o
32obj-$(CONFIG_ARCH_H1940) += mach-h1940.o 37obj-$(CONFIG_ARCH_H1940) += mach-h1940.o
33obj-$(CONFIG_MACH_N30) += mach-n30.o 38obj-$(CONFIG_MACH_N30) += mach-n30.o
diff --git a/arch/arm/mach-s3c2410/bast-irq.c b/arch/arm/mach-s3c2410/bast-irq.c
index 49914709fa09..fbbeb0553006 100644
--- a/arch/arm/mach-s3c2410/bast-irq.c
+++ b/arch/arm/mach-s3c2410/bast-irq.c
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s3c2410/bast-irq.c 1/* linux/arch/arm/mach-s3c2410/bast-irq.c
2 * 2 *
3 * Copyright (c) 2004 Simtec Electronics 3 * Copyright (c) 2003,2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk> 4 * Ben Dooks <ben@simtec.co.uk>
5 * 5 *
6 * http://www.simtec.co.uk/products/EB2410ITX/ 6 * http://www.simtec.co.uk/products/EB2410ITX/
@@ -21,7 +21,8 @@
21 * 21 *
22 * Modifications: 22 * Modifications:
23 * 08-Jan-2003 BJD Moved from central IRQ code 23 * 08-Jan-2003 BJD Moved from central IRQ code
24 */ 24 * 21-Aug-2005 BJD Fixed missing code and compile errors
25*/
25 26
26 27
27#include <linux/init.h> 28#include <linux/init.h>
@@ -30,12 +31,19 @@
30#include <linux/ptrace.h> 31#include <linux/ptrace.h>
31#include <linux/sysdev.h> 32#include <linux/sysdev.h>
32 33
34#include <asm/mach-types.h>
35
33#include <asm/hardware.h> 36#include <asm/hardware.h>
34#include <asm/irq.h> 37#include <asm/irq.h>
35#include <asm/io.h> 38#include <asm/io.h>
36 39
37#include <asm/mach/irq.h> 40#include <asm/mach/irq.h>
38#include <asm/hardware/s3c2410/irq.h> 41
42#include <asm/arch/regs-irq.h>
43#include <asm/arch/bast-map.h>
44#include <asm/arch/bast-irq.h>
45
46#include "irq.h"
39 47
40#if 0 48#if 0
41#include <asm/debug-ll.h> 49#include <asm/debug-ll.h>
@@ -79,15 +87,15 @@ bast_pc104_mask(unsigned int irqno)
79 temp = __raw_readb(BAST_VA_PC104_IRQMASK); 87 temp = __raw_readb(BAST_VA_PC104_IRQMASK);
80 temp &= ~bast_pc104_irqmasks[irqno]; 88 temp &= ~bast_pc104_irqmasks[irqno];
81 __raw_writeb(temp, BAST_VA_PC104_IRQMASK); 89 __raw_writeb(temp, BAST_VA_PC104_IRQMASK);
82
83 if (temp == 0)
84 bast_extint_mask(IRQ_ISA);
85} 90}
86 91
87static void 92static void
88bast_pc104_ack(unsigned int irqno) 93bast_pc104_maskack(unsigned int irqno)
89{ 94{
90 bast_extint_ack(IRQ_ISA); 95 struct irqdesc *desc = irq_desc + IRQ_ISA;
96
97 bast_pc104_mask(irqno);
98 desc->chip->ack(IRQ_ISA);
91} 99}
92 100
93static void 101static void
@@ -98,14 +106,12 @@ bast_pc104_unmask(unsigned int irqno)
98 temp = __raw_readb(BAST_VA_PC104_IRQMASK); 106 temp = __raw_readb(BAST_VA_PC104_IRQMASK);
99 temp |= bast_pc104_irqmasks[irqno]; 107 temp |= bast_pc104_irqmasks[irqno];
100 __raw_writeb(temp, BAST_VA_PC104_IRQMASK); 108 __raw_writeb(temp, BAST_VA_PC104_IRQMASK);
101
102 bast_extint_unmask(IRQ_ISA);
103} 109}
104 110
105static struct bast_pc104_chip = { 111static struct irqchip bast_pc104_chip = {
106 .mask = bast_pc104_mask, 112 .mask = bast_pc104_mask,
107 .unmask = bast_pc104_unmask, 113 .unmask = bast_pc104_unmask,
108 .ack = bast_pc104_ack 114 .ack = bast_pc104_maskack
109}; 115};
110 116
111static void 117static void
@@ -119,14 +125,49 @@ bast_irq_pc104_demux(unsigned int irq,
119 125
120 stat = __raw_readb(BAST_VA_PC104_IRQREQ) & 0xf; 126 stat = __raw_readb(BAST_VA_PC104_IRQREQ) & 0xf;
121 127
122 for (i = 0; i < 4 && stat != 0; i++) { 128 if (unlikely(stat == 0)) {
123 if (stat & 1) { 129 /* ack if we get an irq with nothing (ie, startup) */
124 irqno = bast_pc104_irqs[i]; 130
125 desc = irq_desc + irqno; 131 desc = irq_desc + IRQ_ISA;
132 desc->chip->ack(IRQ_ISA);
133 } else {
134 /* handle the IRQ */
135
136 for (i = 0; stat != 0; i++, stat >>= 1) {
137 if (stat & 1) {
138 irqno = bast_pc104_irqs[i];
126 139
127 desc_handle_irq(irqno, desc, regs); 140 desc_handle_irq(irqno, irq_desc + irqno, regs);
141 }
128 } 142 }
143 }
144}
129 145
130 stat >>= 1; 146static __init int bast_irq_init(void)
147{
148 unsigned int i;
149
150 if (machine_is_bast()) {
151 printk(KERN_INFO "BAST PC104 IRQ routing, (c) 2005 Simtec Electronics\n");
152
153 /* zap all the IRQs */
154
155 __raw_writeb(0x0, BAST_VA_PC104_IRQMASK);
156
157 set_irq_chained_handler(IRQ_ISA, bast_irq_pc104_demux);
158
159 /* reigster our IRQs */
160
161 for (i = 0; i < 4; i++) {
162 unsigned int irqno = bast_pc104_irqs[i];
163
164 set_irq_chip(irqno, &bast_pc104_chip);
165 set_irq_handler(irqno, do_level_IRQ);
166 set_irq_flags(irqno, IRQF_VALID);
167 }
131 } 168 }
169
170 return 0;
132} 171}
172
173arch_initcall(bast_irq_init);
diff --git a/arch/arm/mach-s3c2410/mach-anubis.c b/arch/arm/mach-s3c2410/mach-anubis.c
new file mode 100644
index 000000000000..f87aa0b669ad
--- /dev/null
+++ b/arch/arm/mach-s3c2410/mach-anubis.c
@@ -0,0 +1,270 @@
1/* linux/arch/arm/mach-s3c2410/mach-anubis.c
2 *
3 * Copyright (c) 2003-2005 Simtec Electronics
4 * http://armlinux.simtec.co.uk/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 *
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Modifications:
14 * 02-May-2005 BJD Copied from mach-bast.c
15*/
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/interrupt.h>
20#include <linux/list.h>
21#include <linux/timer.h>
22#include <linux/init.h>
23#include <linux/device.h>
24
25#include <asm/mach/arch.h>
26#include <asm/mach/map.h>
27#include <asm/mach/irq.h>
28
29#include <asm/arch/anubis-map.h>
30#include <asm/arch/anubis-irq.h>
31#include <asm/arch/anubis-cpld.h>
32
33#include <asm/hardware.h>
34#include <asm/io.h>
35#include <asm/irq.h>
36#include <asm/mach-types.h>
37
38#include <asm/arch/regs-serial.h>
39#include <asm/arch/regs-gpio.h>
40#include <asm/arch/regs-mem.h>
41#include <asm/arch/regs-lcd.h>
42#include <asm/arch/nand.h>
43
44#include <linux/mtd/mtd.h>
45#include <linux/mtd/nand.h>
46#include <linux/mtd/nand_ecc.h>
47#include <linux/mtd/partitions.h>
48
49#include "clock.h"
50#include "devs.h"
51#include "cpu.h"
52
53#define COPYRIGHT ", (c) 2005 Simtec Electronics"
54
55static struct map_desc anubis_iodesc[] __initdata = {
56 /* ISA IO areas */
57
58 { (u32)S3C24XX_VA_ISA_BYTE, 0x0, SZ_16M, MT_DEVICE },
59 { (u32)S3C24XX_VA_ISA_WORD, 0x0, SZ_16M, MT_DEVICE },
60
61 /* we could possibly compress the next set down into a set of smaller tables
62 * pagetables, but that would mean using an L2 section, and it still means
63 * we cannot actually feed the same register to an LDR due to 16K spacing
64 */
65
66 /* CPLD control registers */
67
68 { (u32)ANUBIS_VA_CTRL1, ANUBIS_PA_CTRL1, SZ_4K, MT_DEVICE },
69 { (u32)ANUBIS_VA_CTRL2, ANUBIS_PA_CTRL2, SZ_4K, MT_DEVICE },
70
71 /* IDE drives */
72
73 { (u32)ANUBIS_IDEPRI, S3C2410_CS3, SZ_1M, MT_DEVICE },
74 { (u32)ANUBIS_IDEPRIAUX, S3C2410_CS3+(1<<26), SZ_1M, MT_DEVICE },
75
76 { (u32)ANUBIS_IDESEC, S3C2410_CS4, SZ_1M, MT_DEVICE },
77 { (u32)ANUBIS_IDESECAUX, S3C2410_CS4+(1<<26), SZ_1M, MT_DEVICE },
78};
79
80#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
81#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
82#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
83
84static struct s3c24xx_uart_clksrc anubis_serial_clocks[] = {
85 [0] = {
86 .name = "uclk",
87 .divisor = 1,
88 .min_baud = 0,
89 .max_baud = 0,
90 },
91 [1] = {
92 .name = "pclk",
93 .divisor = 1,
94 .min_baud = 0,
95 .max_baud = 0.
96 }
97};
98
99
100static struct s3c2410_uartcfg anubis_uartcfgs[] = {
101 [0] = {
102 .hwport = 0,
103 .flags = 0,
104 .ucon = UCON,
105 .ulcon = ULCON,
106 .ufcon = UFCON,
107 .clocks = anubis_serial_clocks,
108 .clocks_size = ARRAY_SIZE(anubis_serial_clocks)
109 },
110 [1] = {
111 .hwport = 2,
112 .flags = 0,
113 .ucon = UCON,
114 .ulcon = ULCON,
115 .ufcon = UFCON,
116 .clocks = anubis_serial_clocks,
117 .clocks_size = ARRAY_SIZE(anubis_serial_clocks)
118 },
119};
120
121/* NAND Flash on Anubis board */
122
123static int external_map[] = { 2 };
124static int chip0_map[] = { 0 };
125static int chip1_map[] = { 1 };
126
127struct mtd_partition anubis_default_nand_part[] = {
128 [0] = {
129 .name = "Boot Agent",
130 .size = SZ_16K,
131 .offset = 0
132 },
133 [1] = {
134 .name = "/boot",
135 .size = SZ_4M - SZ_16K,
136 .offset = SZ_16K,
137 },
138 [2] = {
139 .name = "user1",
140 .offset = SZ_4M,
141 .size = SZ_32M - SZ_4M,
142 },
143 [3] = {
144 .name = "user2",
145 .offset = SZ_32M,
146 .size = MTDPART_SIZ_FULL,
147 }
148};
149
150/* the Anubis has 3 selectable slots for nand-flash, the two
151 * on-board chip areas, as well as the external slot.
152 *
153 * Note, there is no current hot-plug support for the External
154 * socket.
155*/
156
157static struct s3c2410_nand_set anubis_nand_sets[] = {
158 [1] = {
159 .name = "External",
160 .nr_chips = 1,
161 .nr_map = external_map,
162 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
163 .partitions = anubis_default_nand_part
164 },
165 [0] = {
166 .name = "chip0",
167 .nr_chips = 1,
168 .nr_map = chip0_map,
169 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
170 .partitions = anubis_default_nand_part
171 },
172 [2] = {
173 .name = "chip1",
174 .nr_chips = 1,
175 .nr_map = chip1_map,
176 .nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
177 .partitions = anubis_default_nand_part
178 },
179};
180
181static void anubis_nand_select(struct s3c2410_nand_set *set, int slot)
182{
183 unsigned int tmp;
184
185 slot = set->nr_map[slot] & 3;
186
187 pr_debug("anubis_nand: selecting slot %d (set %p,%p)\n",
188 slot, set, set->nr_map);
189
190 tmp = __raw_readb(ANUBIS_VA_CTRL1);
191 tmp &= ~ANUBIS_CTRL1_NANDSEL;
192 tmp |= slot;
193
194 pr_debug("anubis_nand: ctrl1 now %02x\n", tmp);
195
196 __raw_writeb(tmp, ANUBIS_VA_CTRL1);
197}
198
199static struct s3c2410_platform_nand anubis_nand_info = {
200 .tacls = 25,
201 .twrph0 = 80,
202 .twrph1 = 80,
203 .nr_sets = ARRAY_SIZE(anubis_nand_sets),
204 .sets = anubis_nand_sets,
205 .select_chip = anubis_nand_select,
206};
207
208
209/* Standard Anubis devices */
210
211static struct platform_device *anubis_devices[] __initdata = {
212 &s3c_device_usb,
213 &s3c_device_wdt,
214 &s3c_device_adc,
215 &s3c_device_i2c,
216 &s3c_device_rtc,
217 &s3c_device_nand,
218};
219
220static struct clk *anubis_clocks[] = {
221 &s3c24xx_dclk0,
222 &s3c24xx_dclk1,
223 &s3c24xx_clkout0,
224 &s3c24xx_clkout1,
225 &s3c24xx_uclk,
226};
227
228static struct s3c24xx_board anubis_board __initdata = {
229 .devices = anubis_devices,
230 .devices_count = ARRAY_SIZE(anubis_devices),
231 .clocks = anubis_clocks,
232 .clocks_count = ARRAY_SIZE(anubis_clocks)
233};
234
235void __init anubis_map_io(void)
236{
237 /* initialise the clocks */
238
239 s3c24xx_dclk0.parent = NULL;
240 s3c24xx_dclk0.rate = 12*1000*1000;
241
242 s3c24xx_dclk1.parent = NULL;
243 s3c24xx_dclk1.rate = 24*1000*1000;
244
245 s3c24xx_clkout0.parent = &s3c24xx_dclk0;
246 s3c24xx_clkout1.parent = &s3c24xx_dclk1;
247
248 s3c24xx_uclk.parent = &s3c24xx_clkout1;
249
250 s3c_device_nand.dev.platform_data = &anubis_nand_info;
251
252 s3c24xx_init_io(anubis_iodesc, ARRAY_SIZE(anubis_iodesc));
253 s3c24xx_init_clocks(0);
254 s3c24xx_init_uarts(anubis_uartcfgs, ARRAY_SIZE(anubis_uartcfgs));
255 s3c24xx_set_board(&anubis_board);
256
257 /* ensure that the GPIO is setup */
258 s3c2410_gpio_setpin(S3C2410_GPA0, 1);
259}
260
261MACHINE_START(ANUBIS, "Simtec-Anubis")
262 /* Maintainer: Ben Dooks <ben@simtec.co.uk> */
263 .phys_ram = S3C2410_SDRAM_PA,
264 .phys_io = S3C2410_PA_UART,
265 .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
266 .boot_params = S3C2410_SDRAM_PA + 0x100,
267 .map_io = anubis_map_io,
268 .init_irq = s3c24xx_init_irq,
269 .timer = &s3c24xx_timer,
270MACHINE_END
diff --git a/arch/arm/mach-s3c2410/pm-simtec.c b/arch/arm/mach-s3c2410/pm-simtec.c
index 2cb798832223..4c7ccef6c207 100644
--- a/arch/arm/mach-s3c2410/pm-simtec.c
+++ b/arch/arm/mach-s3c2410/pm-simtec.c
@@ -48,7 +48,7 @@ static __init int pm_simtec_init(void)
48 48
49 /* check which machine we are running on */ 49 /* check which machine we are running on */
50 50
51 if (!machine_is_bast() && !machine_is_vr1000()) 51 if (!machine_is_bast() && !machine_is_vr1000() && !machine_is_anubis())
52 return 0; 52 return 0;
53 53
54 printk(KERN_INFO "Simtec Board Power Manangement" COPYRIGHT "\n"); 54 printk(KERN_INFO "Simtec Board Power Manangement" COPYRIGHT "\n");
diff --git a/arch/arm/mach-s3c2410/time.c b/arch/arm/mach-s3c2410/time.c
index 765a3a9ae032..c0acfb2ad790 100644
--- a/arch/arm/mach-s3c2410/time.c
+++ b/arch/arm/mach-s3c2410/time.c
@@ -164,7 +164,7 @@ static void s3c2410_timer_setup (void)
164 164
165 /* configure the system for whichever machine is in use */ 165 /* configure the system for whichever machine is in use */
166 166
167 if (machine_is_bast() || machine_is_vr1000()) { 167 if (machine_is_bast() || machine_is_vr1000() || machine_is_anubis()) {
168 /* timer is at 12MHz, scaler is 1 */ 168 /* timer is at 12MHz, scaler is 1 */
169 timer_usec_ticks = timer_mask_usec_ticks(1, 12000000); 169 timer_usec_ticks = timer_mask_usec_ticks(1, 12000000);
170 tcnt = 12000000 / HZ; 170 tcnt = 12000000 / HZ;
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 345365852f8c..9693e9b4ffd1 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -91,6 +91,13 @@ config OMAP_32K_TIMER_HZ
91 Kernel internal timer frequency should be a divisor of 32768, 91 Kernel internal timer frequency should be a divisor of 32768,
92 such as 64 or 128. 92 such as 64 or 128.
93 93
94config OMAP_DM_TIMER
95 bool "Use dual-mode timer"
96 default n
97 depends on ARCH_OMAP16XX
98 help
99 Select this option if you want to use OMAP Dual-Mode timers.
100
94choice 101choice
95 prompt "Low-level debug console UART" 102 prompt "Low-level debug console UART"
96 depends on ARCH_OMAP 103 depends on ARCH_OMAP
@@ -107,6 +114,15 @@ config OMAP_LL_DEBUG_UART3
107 114
108endchoice 115endchoice
109 116
117config OMAP_SERIAL_WAKE
118 bool "Enable wake-up events for serial ports"
119 depends OMAP_MUX
120 default y
121 help
122 Select this option if you want to have your system wake up
123 to data on the serial RX line. This allows you to wake the
124 system from serial console.
125
110endmenu 126endmenu
111 127
112endif 128endif
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 531e11af54d4..7e144f9cad1c 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := common.o dma.o clock.o mux.o gpio.o mcbsp.o usb.o 6obj-y := common.o sram.o sram-fn.o clock.o dma.o mux.o gpio.o mcbsp.o usb.o
7obj-m := 7obj-m :=
8obj-n := 8obj-n :=
9obj- := 9obj- :=
@@ -15,3 +15,5 @@ obj-$(CONFIG_ARCH_OMAP16XX) += ocpi.o
15obj-$(CONFIG_PM) += pm.o sleep.o 15obj-$(CONFIG_PM) += pm.o sleep.o
16 16
17obj-$(CONFIG_CPU_FREQ) += cpu-omap.o 17obj-$(CONFIG_CPU_FREQ) += cpu-omap.o
18obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
19
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 59d91b3262ba..52a58b2da288 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -21,6 +21,7 @@
21#include <asm/arch/usb.h> 21#include <asm/arch/usb.h>
22 22
23#include "clock.h" 23#include "clock.h"
24#include "sram.h"
24 25
25static LIST_HEAD(clocks); 26static LIST_HEAD(clocks);
26static DECLARE_MUTEX(clocks_sem); 27static DECLARE_MUTEX(clocks_sem);
@@ -141,7 +142,7 @@ static struct clk arm_ck = {
141static struct clk armper_ck = { 142static struct clk armper_ck = {
142 .name = "armper_ck", 143 .name = "armper_ck",
143 .parent = &ck_dpll1, 144 .parent = &ck_dpll1,
144 .flags = CLOCK_IN_OMAP730 | CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | 145 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
145 RATE_CKCTL, 146 RATE_CKCTL,
146 .enable_reg = ARM_IDLECT2, 147 .enable_reg = ARM_IDLECT2,
147 .enable_bit = EN_PERCK, 148 .enable_bit = EN_PERCK,
@@ -385,7 +386,8 @@ static struct clk uart2_ck = {
385 .name = "uart2_ck", 386 .name = "uart2_ck",
386 /* Direct from ULPD, no parent */ 387 /* Direct from ULPD, no parent */
387 .rate = 12000000, 388 .rate = 12000000,
388 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ENABLE_REG_32BIT, 389 .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ENABLE_REG_32BIT |
390 ALWAYS_ENABLED,
389 .enable_reg = MOD_CONF_CTRL_0, 391 .enable_reg = MOD_CONF_CTRL_0,
390 .enable_bit = 30, /* Chooses between 12MHz and 48MHz */ 392 .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
391 .set_rate = &set_uart_rate, 393 .set_rate = &set_uart_rate,
@@ -443,6 +445,15 @@ static struct clk usb_hhc_ck16xx = {
443 .enable_bit = 8 /* UHOST_EN */, 445 .enable_bit = 8 /* UHOST_EN */,
444}; 446};
445 447
448static struct clk usb_dc_ck = {
449 .name = "usb_dc_ck",
450 /* Direct from ULPD, no parent */
451 .rate = 48000000,
452 .flags = CLOCK_IN_OMAP16XX | RATE_FIXED,
453 .enable_reg = SOFT_REQ_REG,
454 .enable_bit = 4,
455};
456
446static struct clk mclk_1510 = { 457static struct clk mclk_1510 = {
447 .name = "mclk", 458 .name = "mclk",
448 /* Direct from ULPD, no parent. May be enabled by ext hardware. */ 459 /* Direct from ULPD, no parent. May be enabled by ext hardware. */
@@ -552,6 +563,7 @@ static struct clk * onchip_clks[] = {
552 &uart3_16xx, 563 &uart3_16xx,
553 &usb_clko, 564 &usb_clko,
554 &usb_hhc_ck1510, &usb_hhc_ck16xx, 565 &usb_hhc_ck1510, &usb_hhc_ck16xx,
566 &usb_dc_ck,
555 &mclk_1510, &mclk_16xx, 567 &mclk_1510, &mclk_16xx,
556 &bclk_1510, &bclk_16xx, 568 &bclk_1510, &bclk_16xx,
557 &mmc1_ck, 569 &mmc1_ck,
@@ -946,14 +958,13 @@ static int select_table_rate(struct clk * clk, unsigned long rate)
946 if (!ptr->rate) 958 if (!ptr->rate)
947 return -EINVAL; 959 return -EINVAL;
948 960
949 if (!ptr->rate) 961 /*
950 return -EINVAL; 962 * In most cases we should not need to reprogram DPLL.
963 * Reprogramming the DPLL is tricky, it must be done from SRAM.
964 */
965 omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
951 966
952 if (unlikely(ck_dpll1.rate == 0)) { 967 ck_dpll1.rate = ptr->pll_rate;
953 omap_writew(ptr->dpllctl_val, DPLL_CTL);
954 ck_dpll1.rate = ptr->pll_rate;
955 }
956 omap_writew(ptr->ckctl_val, ARM_CKCTL);
957 propagate_rate(&ck_dpll1); 968 propagate_rate(&ck_dpll1);
958 return 0; 969 return 0;
959} 970}
@@ -1224,9 +1235,11 @@ int __init clk_init(void)
1224#endif 1235#endif
1225 /* Cache rates for clocks connected to ck_ref (not dpll1) */ 1236 /* Cache rates for clocks connected to ck_ref (not dpll1) */
1226 propagate_rate(&ck_ref); 1237 propagate_rate(&ck_ref);
1227 printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): %ld.%01ld/%ld/%ld MHz\n", 1238 printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
1239 "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
1228 ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10, 1240 ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
1229 ck_dpll1.rate, arm_ck.rate); 1241 ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
1242 arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
1230 1243
1231#ifdef CONFIG_MACH_OMAP_PERSEUS2 1244#ifdef CONFIG_MACH_OMAP_PERSEUS2
1232 /* Select slicer output as OMAP input clock */ 1245 /* Select slicer output as OMAP input clock */
@@ -1271,7 +1284,9 @@ static int __init omap_late_clk_reset(void)
1271 struct clk *p; 1284 struct clk *p;
1272 __u32 regval32; 1285 __u32 regval32;
1273 1286
1274 omap_writew(0, SOFT_REQ_REG); 1287 /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
1288 regval32 = omap_readw(SOFT_REQ_REG) & (1 << 4);
1289 omap_writew(regval32, SOFT_REQ_REG);
1275 omap_writew(0, SOFT_REQ_REG2); 1290 omap_writew(0, SOFT_REQ_REG2);
1276 1291
1277 list_for_each_entry(p, &clocks, node) { 1292 list_for_each_entry(p, &clocks, node) {
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index ea967a8f6ce5..6cb20aea7f51 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -26,6 +26,7 @@
26#include <asm/hardware/clock.h> 26#include <asm/hardware/clock.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/mach-types.h> 28#include <asm/mach-types.h>
29#include <asm/setup.h>
29 30
30#include <asm/arch/board.h> 31#include <asm/arch/board.h>
31#include <asm/arch/mux.h> 32#include <asm/arch/mux.h>
@@ -35,11 +36,11 @@
35 36
36#define NO_LENGTH_CHECK 0xffffffff 37#define NO_LENGTH_CHECK 0xffffffff
37 38
38extern int omap_bootloader_tag_len; 39unsigned char omap_bootloader_tag[512];
39extern u8 omap_bootloader_tag[]; 40int omap_bootloader_tag_len;
40 41
41struct omap_board_config_kernel *omap_board_config; 42struct omap_board_config_kernel *omap_board_config;
42int omap_board_config_size = 0; 43int omap_board_config_size;
43 44
44static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out) 45static const void *get_config(u16 tag, size_t len, int skip, size_t *len_out)
45{ 46{
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c0a5c2fa42bd..da7b65145658 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -425,7 +425,7 @@ static int dma_handle_ch(int ch)
425 dma_chan[ch + 6].saved_csr = csr >> 7; 425 dma_chan[ch + 6].saved_csr = csr >> 7;
426 csr &= 0x7f; 426 csr &= 0x7f;
427 } 427 }
428 if (!csr) 428 if ((csr & 0x3f) == 0)
429 return 0; 429 return 0;
430 if (unlikely(dma_chan[ch].dev_id == -1)) { 430 if (unlikely(dma_chan[ch].dev_id == -1)) {
431 printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n", 431 printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
@@ -890,11 +890,11 @@ void omap_enable_lcd_dma(void)
890 w |= 1 << 8; 890 w |= 1 << 8;
891 omap_writew(w, OMAP1610_DMA_LCD_CTRL); 891 omap_writew(w, OMAP1610_DMA_LCD_CTRL);
892 892
893 lcd_dma.active = 1;
894
893 w = omap_readw(OMAP1610_DMA_LCD_CCR); 895 w = omap_readw(OMAP1610_DMA_LCD_CCR);
894 w |= 1 << 7; 896 w |= 1 << 7;
895 omap_writew(w, OMAP1610_DMA_LCD_CCR); 897 omap_writew(w, OMAP1610_DMA_LCD_CCR);
896
897 lcd_dma.active = 1;
898} 898}
899 899
900void omap_setup_lcd_dma(void) 900void omap_setup_lcd_dma(void)
@@ -965,8 +965,8 @@ void omap_clear_dma(int lch)
965 */ 965 */
966dma_addr_t omap_get_dma_src_pos(int lch) 966dma_addr_t omap_get_dma_src_pos(int lch)
967{ 967{
968 return (dma_addr_t) (OMAP_DMA_CSSA_L(lch) | 968 return (dma_addr_t) (omap_readw(OMAP_DMA_CSSA_L(lch)) |
969 (OMAP_DMA_CSSA_U(lch) << 16)); 969 (omap_readw(OMAP_DMA_CSSA_U(lch)) << 16));
970} 970}
971 971
972/* 972/*
@@ -979,8 +979,18 @@ dma_addr_t omap_get_dma_src_pos(int lch)
979 */ 979 */
980dma_addr_t omap_get_dma_dst_pos(int lch) 980dma_addr_t omap_get_dma_dst_pos(int lch)
981{ 981{
982 return (dma_addr_t) (OMAP_DMA_CDSA_L(lch) | 982 return (dma_addr_t) (omap_readw(OMAP_DMA_CDSA_L(lch)) |
983 (OMAP_DMA_CDSA_U(lch) << 16)); 983 (omap_readw(OMAP_DMA_CDSA_U(lch)) << 16));
984}
985
986/*
987 * Returns current source transfer counting for the given DMA channel.
988 * Can be used to monitor the progress of a transfer inside a block.
989 * It must be called with disabled interrupts.
990 */
991int omap_get_dma_src_addr_counter(int lch)
992{
993 return (dma_addr_t) omap_readw(OMAP_DMA_CSAC(lch));
984} 994}
985 995
986int omap_dma_running(void) 996int omap_dma_running(void)
@@ -1076,6 +1086,7 @@ arch_initcall(omap_init_dma);
1076 1086
1077EXPORT_SYMBOL(omap_get_dma_src_pos); 1087EXPORT_SYMBOL(omap_get_dma_src_pos);
1078EXPORT_SYMBOL(omap_get_dma_dst_pos); 1088EXPORT_SYMBOL(omap_get_dma_dst_pos);
1089EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
1079EXPORT_SYMBOL(omap_clear_dma); 1090EXPORT_SYMBOL(omap_clear_dma);
1080EXPORT_SYMBOL(omap_set_dma_priority); 1091EXPORT_SYMBOL(omap_set_dma_priority);
1081EXPORT_SYMBOL(omap_request_dma); 1092EXPORT_SYMBOL(omap_request_dma);
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
new file mode 100644
index 000000000000..a1468d7326eb
--- /dev/null
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -0,0 +1,260 @@
1/*
2 * linux/arch/arm/plat-omap/dmtimer.c
3 *
4 * OMAP Dual-Mode Timers
5 *
6 * Copyright (C) 2005 Nokia Corporation
7 * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
21 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/init.h>
29#include <asm/arch/hardware.h>
30#include <asm/arch/dmtimer.h>
31#include <asm/io.h>
32#include <asm/arch/irqs.h>
33#include <linux/spinlock.h>
34#include <linux/list.h>
35
36#define OMAP_TIMER_COUNT 8
37
38#define OMAP_TIMER_ID_REG 0x00
39#define OMAP_TIMER_OCP_CFG_REG 0x10
40#define OMAP_TIMER_SYS_STAT_REG 0x14
41#define OMAP_TIMER_STAT_REG 0x18
42#define OMAP_TIMER_INT_EN_REG 0x1c
43#define OMAP_TIMER_WAKEUP_EN_REG 0x20
44#define OMAP_TIMER_CTRL_REG 0x24
45#define OMAP_TIMER_COUNTER_REG 0x28
46#define OMAP_TIMER_LOAD_REG 0x2c
47#define OMAP_TIMER_TRIGGER_REG 0x30
48#define OMAP_TIMER_WRITE_PEND_REG 0x34
49#define OMAP_TIMER_MATCH_REG 0x38
50#define OMAP_TIMER_CAPTURE_REG 0x3c
51#define OMAP_TIMER_IF_CTRL_REG 0x40
52
53
54static struct dmtimer_info_struct {
55 struct list_head unused_timers;
56 struct list_head reserved_timers;
57} dm_timer_info;
58
59static struct omap_dm_timer dm_timers[] = {
60 { .base=0xfffb1400, .irq=INT_1610_GPTIMER1 },
61 { .base=0xfffb1c00, .irq=INT_1610_GPTIMER2 },
62 { .base=0xfffb2400, .irq=INT_1610_GPTIMER3 },
63 { .base=0xfffb2c00, .irq=INT_1610_GPTIMER4 },
64 { .base=0xfffb3400, .irq=INT_1610_GPTIMER5 },
65 { .base=0xfffb3c00, .irq=INT_1610_GPTIMER6 },
66 { .base=0xfffb4400, .irq=INT_1610_GPTIMER7 },
67 { .base=0xfffb4c00, .irq=INT_1610_GPTIMER8 },
68 { .base=0x0 },
69};
70
71
72static spinlock_t dm_timer_lock;
73
74
75inline void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value)
76{
77 omap_writel(value, timer->base + reg);
78 while (omap_dm_timer_read_reg(timer, OMAP_TIMER_WRITE_PEND_REG))
79 ;
80}
81
82u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg)
83{
84 return omap_readl(timer->base + reg);
85}
86
87int omap_dm_timers_active(void)
88{
89 struct omap_dm_timer *timer;
90
91 for (timer = &dm_timers[0]; timer->base; ++timer)
92 if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
93 OMAP_TIMER_CTRL_ST)
94 return 1;
95
96 return 0;
97}
98
99
100void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
101{
102 int n = (timer - dm_timers) << 1;
103 u32 l;
104
105 l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n);
106 l |= source << n;
107 omap_writel(l, MOD_CONF_CTRL_1);
108}
109
110
111static void omap_dm_timer_reset(struct omap_dm_timer *timer)
112{
113 /* Reset and set posted mode */
114 omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
115 omap_dm_timer_write_reg(timer, OMAP_TIMER_OCP_CFG_REG, 0x02);
116
117 omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_ARMXOR);
118}
119
120
121
122struct omap_dm_timer * omap_dm_timer_request(void)
123{
124 struct omap_dm_timer *timer = NULL;
125 unsigned long flags;
126
127 spin_lock_irqsave(&dm_timer_lock, flags);
128 if (!list_empty(&dm_timer_info.unused_timers)) {
129 timer = (struct omap_dm_timer *)
130 dm_timer_info.unused_timers.next;
131 list_move_tail((struct list_head *)timer,
132 &dm_timer_info.reserved_timers);
133 }
134 spin_unlock_irqrestore(&dm_timer_lock, flags);
135
136 return timer;
137}
138
139
140void omap_dm_timer_free(struct omap_dm_timer *timer)
141{
142 unsigned long flags;
143
144 omap_dm_timer_reset(timer);
145
146 spin_lock_irqsave(&dm_timer_lock, flags);
147 list_move_tail((struct list_head *)timer, &dm_timer_info.unused_timers);
148 spin_unlock_irqrestore(&dm_timer_lock, flags);
149}
150
151void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
152 unsigned int value)
153{
154 omap_dm_timer_write_reg(timer, OMAP_TIMER_INT_EN_REG, value);
155}
156
157unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
158{
159 return omap_dm_timer_read_reg(timer, OMAP_TIMER_STAT_REG);
160}
161
162void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
163{
164 omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG, value);
165}
166
167void omap_dm_timer_enable_autoreload(struct omap_dm_timer *timer)
168{
169 u32 l;
170 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
171 l |= OMAP_TIMER_CTRL_AR;
172 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
173}
174
175void omap_dm_timer_trigger(struct omap_dm_timer *timer)
176{
177 omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 1);
178}
179
180void omap_dm_timer_set_trigger(struct omap_dm_timer *timer, unsigned int value)
181{
182 u32 l;
183
184 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
185 l |= value & 0x3;
186 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
187}
188
189void omap_dm_timer_start(struct omap_dm_timer *timer)
190{
191 u32 l;
192
193 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
194 l |= OMAP_TIMER_CTRL_ST;
195 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
196}
197
198void omap_dm_timer_stop(struct omap_dm_timer *timer)
199{
200 u32 l;
201
202 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
203 l &= ~0x1;
204 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
205}
206
207unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
208{
209 return omap_dm_timer_read_reg(timer, OMAP_TIMER_COUNTER_REG);
210}
211
212void omap_dm_timer_reset_counter(struct omap_dm_timer *timer)
213{
214 omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, 0);
215}
216
217void omap_dm_timer_set_load(struct omap_dm_timer *timer, unsigned int load)
218{
219 omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
220}
221
222void omap_dm_timer_set_match(struct omap_dm_timer *timer, unsigned int match)
223{
224 omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
225}
226
227void omap_dm_timer_enable_compare(struct omap_dm_timer *timer)
228{
229 u32 l;
230
231 l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
232 l |= OMAP_TIMER_CTRL_CE;
233 omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
234}
235
236
237static inline void __dm_timer_init(void)
238{
239 struct omap_dm_timer *timer;
240
241 spin_lock_init(&dm_timer_lock);
242 INIT_LIST_HEAD(&dm_timer_info.unused_timers);
243 INIT_LIST_HEAD(&dm_timer_info.reserved_timers);
244
245 timer = &dm_timers[0];
246 while (timer->base) {
247 list_add_tail((struct list_head *)timer, &dm_timer_info.unused_timers);
248 omap_dm_timer_reset(timer);
249 timer++;
250 }
251}
252
253static int __init omap_dm_timer_init(void)
254{
255 if (cpu_is_omap16xx())
256 __dm_timer_init();
257 return 0;
258}
259
260arch_initcall(omap_dm_timer_init);
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index aa481ea3d702..55059a24ad41 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Support functions for OMAP GPIO 4 * Support functions for OMAP GPIO
5 * 5 *
6 * Copyright (C) 2003 Nokia Corporation 6 * Copyright (C) 2003-2005 Nokia Corporation
7 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
@@ -17,8 +17,11 @@
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/sysdev.h>
21#include <linux/err.h>
20 22
21#include <asm/hardware.h> 23#include <asm/hardware.h>
24#include <asm/hardware/clock.h>
22#include <asm/irq.h> 25#include <asm/irq.h>
23#include <asm/arch/irqs.h> 26#include <asm/arch/irqs.h>
24#include <asm/arch/gpio.h> 27#include <asm/arch/gpio.h>
@@ -29,7 +32,7 @@
29/* 32/*
30 * OMAP1510 GPIO registers 33 * OMAP1510 GPIO registers
31 */ 34 */
32#define OMAP1510_GPIO_BASE 0xfffce000 35#define OMAP1510_GPIO_BASE (void __iomem *)0xfffce000
33#define OMAP1510_GPIO_DATA_INPUT 0x00 36#define OMAP1510_GPIO_DATA_INPUT 0x00
34#define OMAP1510_GPIO_DATA_OUTPUT 0x04 37#define OMAP1510_GPIO_DATA_OUTPUT 0x04
35#define OMAP1510_GPIO_DIR_CONTROL 0x08 38#define OMAP1510_GPIO_DIR_CONTROL 0x08
@@ -43,34 +46,37 @@
43/* 46/*
44 * OMAP1610 specific GPIO registers 47 * OMAP1610 specific GPIO registers
45 */ 48 */
46#define OMAP1610_GPIO1_BASE 0xfffbe400 49#define OMAP1610_GPIO1_BASE (void __iomem *)0xfffbe400
47#define OMAP1610_GPIO2_BASE 0xfffbec00 50#define OMAP1610_GPIO2_BASE (void __iomem *)0xfffbec00
48#define OMAP1610_GPIO3_BASE 0xfffbb400 51#define OMAP1610_GPIO3_BASE (void __iomem *)0xfffbb400
49#define OMAP1610_GPIO4_BASE 0xfffbbc00 52#define OMAP1610_GPIO4_BASE (void __iomem *)0xfffbbc00
50#define OMAP1610_GPIO_REVISION 0x0000 53#define OMAP1610_GPIO_REVISION 0x0000
51#define OMAP1610_GPIO_SYSCONFIG 0x0010 54#define OMAP1610_GPIO_SYSCONFIG 0x0010
52#define OMAP1610_GPIO_SYSSTATUS 0x0014 55#define OMAP1610_GPIO_SYSSTATUS 0x0014
53#define OMAP1610_GPIO_IRQSTATUS1 0x0018 56#define OMAP1610_GPIO_IRQSTATUS1 0x0018
54#define OMAP1610_GPIO_IRQENABLE1 0x001c 57#define OMAP1610_GPIO_IRQENABLE1 0x001c
58#define OMAP1610_GPIO_WAKEUPENABLE 0x0028
55#define OMAP1610_GPIO_DATAIN 0x002c 59#define OMAP1610_GPIO_DATAIN 0x002c
56#define OMAP1610_GPIO_DATAOUT 0x0030 60#define OMAP1610_GPIO_DATAOUT 0x0030
57#define OMAP1610_GPIO_DIRECTION 0x0034 61#define OMAP1610_GPIO_DIRECTION 0x0034
58#define OMAP1610_GPIO_EDGE_CTRL1 0x0038 62#define OMAP1610_GPIO_EDGE_CTRL1 0x0038
59#define OMAP1610_GPIO_EDGE_CTRL2 0x003c 63#define OMAP1610_GPIO_EDGE_CTRL2 0x003c
60#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c 64#define OMAP1610_GPIO_CLEAR_IRQENABLE1 0x009c
65#define OMAP1610_GPIO_CLEAR_WAKEUPENA 0x00a8
61#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0 66#define OMAP1610_GPIO_CLEAR_DATAOUT 0x00b0
62#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc 67#define OMAP1610_GPIO_SET_IRQENABLE1 0x00dc
68#define OMAP1610_GPIO_SET_WAKEUPENA 0x00e8
63#define OMAP1610_GPIO_SET_DATAOUT 0x00f0 69#define OMAP1610_GPIO_SET_DATAOUT 0x00f0
64 70
65/* 71/*
66 * OMAP730 specific GPIO registers 72 * OMAP730 specific GPIO registers
67 */ 73 */
68#define OMAP730_GPIO1_BASE 0xfffbc000 74#define OMAP730_GPIO1_BASE (void __iomem *)0xfffbc000
69#define OMAP730_GPIO2_BASE 0xfffbc800 75#define OMAP730_GPIO2_BASE (void __iomem *)0xfffbc800
70#define OMAP730_GPIO3_BASE 0xfffbd000 76#define OMAP730_GPIO3_BASE (void __iomem *)0xfffbd000
71#define OMAP730_GPIO4_BASE 0xfffbd800 77#define OMAP730_GPIO4_BASE (void __iomem *)0xfffbd800
72#define OMAP730_GPIO5_BASE 0xfffbe000 78#define OMAP730_GPIO5_BASE (void __iomem *)0xfffbe000
73#define OMAP730_GPIO6_BASE 0xfffbe800 79#define OMAP730_GPIO6_BASE (void __iomem *)0xfffbe800
74#define OMAP730_GPIO_DATA_INPUT 0x00 80#define OMAP730_GPIO_DATA_INPUT 0x00
75#define OMAP730_GPIO_DATA_OUTPUT 0x04 81#define OMAP730_GPIO_DATA_OUTPUT 0x04
76#define OMAP730_GPIO_DIR_CONTROL 0x08 82#define OMAP730_GPIO_DIR_CONTROL 0x08
@@ -78,14 +84,43 @@
78#define OMAP730_GPIO_INT_MASK 0x10 84#define OMAP730_GPIO_INT_MASK 0x10
79#define OMAP730_GPIO_INT_STATUS 0x14 85#define OMAP730_GPIO_INT_STATUS 0x14
80 86
87/*
88 * omap24xx specific GPIO registers
89 */
90#define OMAP24XX_GPIO1_BASE (void __iomem *)0x48018000
91#define OMAP24XX_GPIO2_BASE (void __iomem *)0x4801a000
92#define OMAP24XX_GPIO3_BASE (void __iomem *)0x4801c000
93#define OMAP24XX_GPIO4_BASE (void __iomem *)0x4801e000
94#define OMAP24XX_GPIO_REVISION 0x0000
95#define OMAP24XX_GPIO_SYSCONFIG 0x0010
96#define OMAP24XX_GPIO_SYSSTATUS 0x0014
97#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
98#define OMAP24XX_GPIO_IRQENABLE1 0x001c
99#define OMAP24XX_GPIO_CTRL 0x0030
100#define OMAP24XX_GPIO_OE 0x0034
101#define OMAP24XX_GPIO_DATAIN 0x0038
102#define OMAP24XX_GPIO_DATAOUT 0x003c
103#define OMAP24XX_GPIO_LEVELDETECT0 0x0040
104#define OMAP24XX_GPIO_LEVELDETECT1 0x0044
105#define OMAP24XX_GPIO_RISINGDETECT 0x0048
106#define OMAP24XX_GPIO_FALLINGDETECT 0x004c
107#define OMAP24XX_GPIO_CLEARIRQENABLE1 0x0060
108#define OMAP24XX_GPIO_SETIRQENABLE1 0x0064
109#define OMAP24XX_GPIO_CLEARWKUENA 0x0080
110#define OMAP24XX_GPIO_SETWKUENA 0x0084
111#define OMAP24XX_GPIO_CLEARDATAOUT 0x0090
112#define OMAP24XX_GPIO_SETDATAOUT 0x0094
113
81#define OMAP_MPUIO_MASK (~OMAP_MAX_GPIO_LINES & 0xff) 114#define OMAP_MPUIO_MASK (~OMAP_MAX_GPIO_LINES & 0xff)
82 115
83struct gpio_bank { 116struct gpio_bank {
84 u32 base; 117 void __iomem *base;
85 u16 irq; 118 u16 irq;
86 u16 virtual_irq_start; 119 u16 virtual_irq_start;
87 u8 method; 120 int method;
88 u32 reserved_map; 121 u32 reserved_map;
122 u32 suspend_wakeup;
123 u32 saved_wakeup;
89 spinlock_t lock; 124 spinlock_t lock;
90}; 125};
91 126
@@ -93,8 +128,9 @@ struct gpio_bank {
93#define METHOD_GPIO_1510 1 128#define METHOD_GPIO_1510 1
94#define METHOD_GPIO_1610 2 129#define METHOD_GPIO_1610 2
95#define METHOD_GPIO_730 3 130#define METHOD_GPIO_730 3
131#define METHOD_GPIO_24XX 4
96 132
97#if defined(CONFIG_ARCH_OMAP16XX) 133#ifdef CONFIG_ARCH_OMAP16XX
98static struct gpio_bank gpio_bank_1610[5] = { 134static struct gpio_bank gpio_bank_1610[5] = {
99 { OMAP_MPUIO_BASE, INT_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO}, 135 { OMAP_MPUIO_BASE, INT_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO},
100 { OMAP1610_GPIO1_BASE, INT_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_1610 }, 136 { OMAP1610_GPIO1_BASE, INT_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_1610 },
@@ -123,6 +159,15 @@ static struct gpio_bank gpio_bank_730[7] = {
123}; 159};
124#endif 160#endif
125 161
162#ifdef CONFIG_ARCH_OMAP24XX
163static struct gpio_bank gpio_bank_24xx[4] = {
164 { OMAP24XX_GPIO1_BASE, INT_24XX_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_24XX },
165 { OMAP24XX_GPIO2_BASE, INT_24XX_GPIO_BANK2, IH_GPIO_BASE + 32, METHOD_GPIO_24XX },
166 { OMAP24XX_GPIO3_BASE, INT_24XX_GPIO_BANK3, IH_GPIO_BASE + 64, METHOD_GPIO_24XX },
167 { OMAP24XX_GPIO4_BASE, INT_24XX_GPIO_BANK4, IH_GPIO_BASE + 96, METHOD_GPIO_24XX },
168};
169#endif
170
126static struct gpio_bank *gpio_bank; 171static struct gpio_bank *gpio_bank;
127static int gpio_bank_count; 172static int gpio_bank_count;
128 173
@@ -149,14 +194,23 @@ static inline struct gpio_bank *get_gpio_bank(int gpio)
149 return &gpio_bank[1 + (gpio >> 5)]; 194 return &gpio_bank[1 + (gpio >> 5)];
150 } 195 }
151#endif 196#endif
197#ifdef CONFIG_ARCH_OMAP24XX
198 if (cpu_is_omap24xx())
199 return &gpio_bank[gpio >> 5];
200#endif
152} 201}
153 202
154static inline int get_gpio_index(int gpio) 203static inline int get_gpio_index(int gpio)
155{ 204{
205#ifdef CONFIG_ARCH_OMAP730
156 if (cpu_is_omap730()) 206 if (cpu_is_omap730())
157 return gpio & 0x1f; 207 return gpio & 0x1f;
158 else 208#endif
159 return gpio & 0x0f; 209#ifdef CONFIG_ARCH_OMAP24XX
210 if (cpu_is_omap24xx())
211 return gpio & 0x1f;
212#endif
213 return gpio & 0x0f;
160} 214}
161 215
162static inline int gpio_valid(int gpio) 216static inline int gpio_valid(int gpio)
@@ -180,6 +234,10 @@ static inline int gpio_valid(int gpio)
180 if (cpu_is_omap730() && gpio < 192) 234 if (cpu_is_omap730() && gpio < 192)
181 return 0; 235 return 0;
182#endif 236#endif
237#ifdef CONFIG_ARCH_OMAP24XX
238 if (cpu_is_omap24xx() && gpio < 128)
239 return 0;
240#endif
183 return -1; 241 return -1;
184} 242}
185 243
@@ -195,7 +253,7 @@ static int check_gpio(int gpio)
195 253
196static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) 254static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
197{ 255{
198 u32 reg = bank->base; 256 void __iomem *reg = bank->base;
199 u32 l; 257 u32 l;
200 258
201 switch (bank->method) { 259 switch (bank->method) {
@@ -211,6 +269,9 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
211 case METHOD_GPIO_730: 269 case METHOD_GPIO_730:
212 reg += OMAP730_GPIO_DIR_CONTROL; 270 reg += OMAP730_GPIO_DIR_CONTROL;
213 break; 271 break;
272 case METHOD_GPIO_24XX:
273 reg += OMAP24XX_GPIO_OE;
274 break;
214 } 275 }
215 l = __raw_readl(reg); 276 l = __raw_readl(reg);
216 if (is_input) 277 if (is_input)
@@ -234,7 +295,7 @@ void omap_set_gpio_direction(int gpio, int is_input)
234 295
235static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable) 296static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
236{ 297{
237 u32 reg = bank->base; 298 void __iomem *reg = bank->base;
238 u32 l = 0; 299 u32 l = 0;
239 300
240 switch (bank->method) { 301 switch (bank->method) {
@@ -269,6 +330,13 @@ static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
269 else 330 else
270 l &= ~(1 << gpio); 331 l &= ~(1 << gpio);
271 break; 332 break;
333 case METHOD_GPIO_24XX:
334 if (enable)
335 reg += OMAP24XX_GPIO_SETDATAOUT;
336 else
337 reg += OMAP24XX_GPIO_CLEARDATAOUT;
338 l = 1 << gpio;
339 break;
272 default: 340 default:
273 BUG(); 341 BUG();
274 return; 342 return;
@@ -291,7 +359,7 @@ void omap_set_gpio_dataout(int gpio, int enable)
291int omap_get_gpio_datain(int gpio) 359int omap_get_gpio_datain(int gpio)
292{ 360{
293 struct gpio_bank *bank; 361 struct gpio_bank *bank;
294 u32 reg; 362 void __iomem *reg;
295 363
296 if (check_gpio(gpio) < 0) 364 if (check_gpio(gpio) < 0)
297 return -1; 365 return -1;
@@ -310,109 +378,132 @@ int omap_get_gpio_datain(int gpio)
310 case METHOD_GPIO_730: 378 case METHOD_GPIO_730:
311 reg += OMAP730_GPIO_DATA_INPUT; 379 reg += OMAP730_GPIO_DATA_INPUT;
312 break; 380 break;
381 case METHOD_GPIO_24XX:
382 reg += OMAP24XX_GPIO_DATAIN;
383 break;
313 default: 384 default:
314 BUG(); 385 BUG();
315 return -1; 386 return -1;
316 } 387 }
317 return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0; 388 return (__raw_readl(reg)
389 & (1 << get_gpio_index(gpio))) != 0;
318} 390}
319 391
320static void _set_gpio_edge_ctrl(struct gpio_bank *bank, int gpio, int edge) 392#define MOD_REG_BIT(reg, bit_mask, set) \
393do { \
394 int l = __raw_readl(base + reg); \
395 if (set) l |= bit_mask; \
396 else l &= ~bit_mask; \
397 __raw_writel(l, base + reg); \
398} while(0)
399
400static inline void set_24xx_gpio_triggering(void __iomem *base, int gpio, int trigger)
321{ 401{
322 u32 reg = bank->base; 402 u32 gpio_bit = 1 << gpio;
323 u32 l; 403
404 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT0, gpio_bit,
405 trigger & IRQT_LOW);
406 MOD_REG_BIT(OMAP24XX_GPIO_LEVELDETECT1, gpio_bit,
407 trigger & IRQT_HIGH);
408 MOD_REG_BIT(OMAP24XX_GPIO_RISINGDETECT, gpio_bit,
409 trigger & IRQT_RISING);
410 MOD_REG_BIT(OMAP24XX_GPIO_FALLINGDETECT, gpio_bit,
411 trigger & IRQT_FALLING);
412 /* FIXME: Possibly do 'set_irq_handler(j, do_level_IRQ)' if only level
413 * triggering requested. */
414}
415
416static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
417{
418 void __iomem *reg = bank->base;
419 u32 l = 0;
324 420
325 switch (bank->method) { 421 switch (bank->method) {
326 case METHOD_MPUIO: 422 case METHOD_MPUIO:
327 reg += OMAP_MPUIO_GPIO_INT_EDGE; 423 reg += OMAP_MPUIO_GPIO_INT_EDGE;
328 l = __raw_readl(reg); 424 l = __raw_readl(reg);
329 if (edge == OMAP_GPIO_RISING_EDGE) 425 if (trigger == IRQT_RISING)
330 l |= 1 << gpio; 426 l |= 1 << gpio;
331 else 427 else if (trigger == IRQT_FALLING)
332 l &= ~(1 << gpio); 428 l &= ~(1 << gpio);
333 __raw_writel(l, reg); 429 else
430 goto bad;
334 break; 431 break;
335 case METHOD_GPIO_1510: 432 case METHOD_GPIO_1510:
336 reg += OMAP1510_GPIO_INT_CONTROL; 433 reg += OMAP1510_GPIO_INT_CONTROL;
337 l = __raw_readl(reg); 434 l = __raw_readl(reg);
338 if (edge == OMAP_GPIO_RISING_EDGE) 435 if (trigger == IRQT_RISING)
339 l |= 1 << gpio; 436 l |= 1 << gpio;
340 else 437 else if (trigger == IRQT_FALLING)
341 l &= ~(1 << gpio); 438 l &= ~(1 << gpio);
342 __raw_writel(l, reg); 439 else
440 goto bad;
343 break; 441 break;
344 case METHOD_GPIO_1610: 442 case METHOD_GPIO_1610:
345 edge &= 0x03;
346 if (gpio & 0x08) 443 if (gpio & 0x08)
347 reg += OMAP1610_GPIO_EDGE_CTRL2; 444 reg += OMAP1610_GPIO_EDGE_CTRL2;
348 else 445 else
349 reg += OMAP1610_GPIO_EDGE_CTRL1; 446 reg += OMAP1610_GPIO_EDGE_CTRL1;
350 gpio &= 0x07; 447 gpio &= 0x07;
448 /* We allow only edge triggering, i.e. two lowest bits */
449 if (trigger & ~IRQT_BOTHEDGE)
450 BUG();
451 /* NOTE: knows __IRQT_{FAL,RIS}EDGE match OMAP hardware */
452 trigger &= 0x03;
351 l = __raw_readl(reg); 453 l = __raw_readl(reg);
352 l &= ~(3 << (gpio << 1)); 454 l &= ~(3 << (gpio << 1));
353 l |= edge << (gpio << 1); 455 l |= trigger << (gpio << 1);
354 __raw_writel(l, reg);
355 break; 456 break;
356 case METHOD_GPIO_730: 457 case METHOD_GPIO_730:
357 reg += OMAP730_GPIO_INT_CONTROL; 458 reg += OMAP730_GPIO_INT_CONTROL;
358 l = __raw_readl(reg); 459 l = __raw_readl(reg);
359 if (edge == OMAP_GPIO_RISING_EDGE) 460 if (trigger == IRQT_RISING)
360 l |= 1 << gpio; 461 l |= 1 << gpio;
361 else 462 else if (trigger == IRQT_FALLING)
362 l &= ~(1 << gpio); 463 l &= ~(1 << gpio);
363 __raw_writel(l, reg); 464 else
465 goto bad;
466 break;
467 case METHOD_GPIO_24XX:
468 set_24xx_gpio_triggering(reg, gpio, trigger);
364 break; 469 break;
365 default: 470 default:
366 BUG(); 471 BUG();
367 return; 472 goto bad;
368 } 473 }
474 __raw_writel(l, reg);
475 return 0;
476bad:
477 return -EINVAL;
369} 478}
370 479
371void omap_set_gpio_edge_ctrl(int gpio, int edge) 480static int gpio_irq_type(unsigned irq, unsigned type)
372{ 481{
373 struct gpio_bank *bank; 482 struct gpio_bank *bank;
483 unsigned gpio;
484 int retval;
485
486 if (irq > IH_MPUIO_BASE)
487 gpio = OMAP_MPUIO(irq - IH_MPUIO_BASE);
488 else
489 gpio = irq - IH_GPIO_BASE;
374 490
375 if (check_gpio(gpio) < 0) 491 if (check_gpio(gpio) < 0)
376 return; 492 return -EINVAL;
493
494 if (type & (__IRQT_LOWLVL|__IRQT_HIGHLVL|IRQT_PROBE))
495 return -EINVAL;
496
377 bank = get_gpio_bank(gpio); 497 bank = get_gpio_bank(gpio);
378 spin_lock(&bank->lock); 498 spin_lock(&bank->lock);
379 _set_gpio_edge_ctrl(bank, get_gpio_index(gpio), edge); 499 retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
380 spin_unlock(&bank->lock); 500 spin_unlock(&bank->lock);
381} 501 return retval;
382
383
384static int _get_gpio_edge_ctrl(struct gpio_bank *bank, int gpio)
385{
386 u32 reg = bank->base, l;
387
388 switch (bank->method) {
389 case METHOD_MPUIO:
390 l = __raw_readl(reg + OMAP_MPUIO_GPIO_INT_EDGE);
391 return (l & (1 << gpio)) ?
392 OMAP_GPIO_RISING_EDGE : OMAP_GPIO_FALLING_EDGE;
393 case METHOD_GPIO_1510:
394 l = __raw_readl(reg + OMAP1510_GPIO_INT_CONTROL);
395 return (l & (1 << gpio)) ?
396 OMAP_GPIO_RISING_EDGE : OMAP_GPIO_FALLING_EDGE;
397 case METHOD_GPIO_1610:
398 if (gpio & 0x08)
399 reg += OMAP1610_GPIO_EDGE_CTRL2;
400 else
401 reg += OMAP1610_GPIO_EDGE_CTRL1;
402 return (__raw_readl(reg) >> ((gpio & 0x07) << 1)) & 0x03;
403 case METHOD_GPIO_730:
404 l = __raw_readl(reg + OMAP730_GPIO_INT_CONTROL);
405 return (l & (1 << gpio)) ?
406 OMAP_GPIO_RISING_EDGE : OMAP_GPIO_FALLING_EDGE;
407 default:
408 BUG();
409 return -1;
410 }
411} 502}
412 503
413static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 504static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
414{ 505{
415 u32 reg = bank->base; 506 void __iomem *reg = bank->base;
416 507
417 switch (bank->method) { 508 switch (bank->method) {
418 case METHOD_MPUIO: 509 case METHOD_MPUIO:
@@ -428,6 +519,9 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
428 case METHOD_GPIO_730: 519 case METHOD_GPIO_730:
429 reg += OMAP730_GPIO_INT_STATUS; 520 reg += OMAP730_GPIO_INT_STATUS;
430 break; 521 break;
522 case METHOD_GPIO_24XX:
523 reg += OMAP24XX_GPIO_IRQSTATUS1;
524 break;
431 default: 525 default:
432 BUG(); 526 BUG();
433 return; 527 return;
@@ -442,7 +536,7 @@ static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
442 536
443static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable) 537static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
444{ 538{
445 u32 reg = bank->base; 539 void __iomem *reg = bank->base;
446 u32 l; 540 u32 l;
447 541
448 switch (bank->method) { 542 switch (bank->method) {
@@ -477,6 +571,13 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enab
477 else 571 else
478 l |= gpio_mask; 572 l |= gpio_mask;
479 break; 573 break;
574 case METHOD_GPIO_24XX:
575 if (enable)
576 reg += OMAP24XX_GPIO_SETIRQENABLE1;
577 else
578 reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
579 l = gpio_mask;
580 break;
480 default: 581 default:
481 BUG(); 582 BUG();
482 return; 583 return;
@@ -489,6 +590,50 @@ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int ena
489 _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable); 590 _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
490} 591}
491 592
593/*
594 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
595 * 1510 does not seem to have a wake-up register. If JTAG is connected
596 * to the target, system will wake up always on GPIO events. While
597 * system is running all registered GPIO interrupts need to have wake-up
598 * enabled. When system is suspended, only selected GPIO interrupts need
599 * to have wake-up enabled.
600 */
601static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
602{
603 switch (bank->method) {
604 case METHOD_GPIO_1610:
605 case METHOD_GPIO_24XX:
606 spin_lock(&bank->lock);
607 if (enable)
608 bank->suspend_wakeup |= (1 << gpio);
609 else
610 bank->suspend_wakeup &= ~(1 << gpio);
611 spin_unlock(&bank->lock);
612 return 0;
613 default:
614 printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
615 bank->method);
616 return -EINVAL;
617 }
618}
619
620/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
621static int gpio_wake_enable(unsigned int irq, unsigned int enable)
622{
623 unsigned int gpio = irq - IH_GPIO_BASE;
624 struct gpio_bank *bank;
625 int retval;
626
627 if (check_gpio(gpio) < 0)
628 return -ENODEV;
629 bank = get_gpio_bank(gpio);
630 spin_lock(&bank->lock);
631 retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
632 spin_unlock(&bank->lock);
633
634 return retval;
635}
636
492int omap_request_gpio(int gpio) 637int omap_request_gpio(int gpio)
493{ 638{
494 struct gpio_bank *bank; 639 struct gpio_bank *bank;
@@ -505,15 +650,33 @@ int omap_request_gpio(int gpio)
505 return -1; 650 return -1;
506 } 651 }
507 bank->reserved_map |= (1 << get_gpio_index(gpio)); 652 bank->reserved_map |= (1 << get_gpio_index(gpio));
653
654 /* Set trigger to none. You need to enable the trigger after request_irq */
655 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQT_NOEDGE);
656
508#ifdef CONFIG_ARCH_OMAP1510 657#ifdef CONFIG_ARCH_OMAP1510
509 if (bank->method == METHOD_GPIO_1510) { 658 if (bank->method == METHOD_GPIO_1510) {
510 u32 reg; 659 void __iomem *reg;
511 660
512 /* Claim the pin for the ARM */ 661 /* Claim the pin for MPU */
513 reg = bank->base + OMAP1510_GPIO_PIN_CONTROL; 662 reg = bank->base + OMAP1510_GPIO_PIN_CONTROL;
514 __raw_writel(__raw_readl(reg) | (1 << get_gpio_index(gpio)), reg); 663 __raw_writel(__raw_readl(reg) | (1 << get_gpio_index(gpio)), reg);
515 } 664 }
516#endif 665#endif
666#ifdef CONFIG_ARCH_OMAP16XX
667 if (bank->method == METHOD_GPIO_1610) {
668 /* Enable wake-up during idle for dynamic tick */
669 void __iomem *reg = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
670 __raw_writel(1 << get_gpio_index(gpio), reg);
671 }
672#endif
673#ifdef CONFIG_ARCH_OMAP24XX
674 if (bank->method == METHOD_GPIO_24XX) {
675 /* Enable wake-up during idle for dynamic tick */
676 void __iomem *reg = bank->base + OMAP24XX_GPIO_SETWKUENA;
677 __raw_writel(1 << get_gpio_index(gpio), reg);
678 }
679#endif
517 spin_unlock(&bank->lock); 680 spin_unlock(&bank->lock);
518 681
519 return 0; 682 return 0;
@@ -533,6 +696,20 @@ void omap_free_gpio(int gpio)
533 spin_unlock(&bank->lock); 696 spin_unlock(&bank->lock);
534 return; 697 return;
535 } 698 }
699#ifdef CONFIG_ARCH_OMAP16XX
700 if (bank->method == METHOD_GPIO_1610) {
701 /* Disable wake-up during idle for dynamic tick */
702 void __iomem *reg = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
703 __raw_writel(1 << get_gpio_index(gpio), reg);
704 }
705#endif
706#ifdef CONFIG_ARCH_OMAP24XX
707 if (bank->method == METHOD_GPIO_24XX) {
708 /* Disable wake-up during idle for dynamic tick */
709 void __iomem *reg = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
710 __raw_writel(1 << get_gpio_index(gpio), reg);
711 }
712#endif
536 bank->reserved_map &= ~(1 << get_gpio_index(gpio)); 713 bank->reserved_map &= ~(1 << get_gpio_index(gpio));
537 _set_gpio_direction(bank, get_gpio_index(gpio), 1); 714 _set_gpio_direction(bank, get_gpio_index(gpio), 1);
538 _set_gpio_irqenable(bank, gpio, 0); 715 _set_gpio_irqenable(bank, gpio, 0);
@@ -552,7 +729,7 @@ void omap_free_gpio(int gpio)
552static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc, 729static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
553 struct pt_regs *regs) 730 struct pt_regs *regs)
554{ 731{
555 u32 isr_reg = 0; 732 void __iomem *isr_reg = NULL;
556 u32 isr; 733 u32 isr;
557 unsigned int gpio_irq; 734 unsigned int gpio_irq;
558 struct gpio_bank *bank; 735 struct gpio_bank *bank;
@@ -574,24 +751,30 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
574 if (bank->method == METHOD_GPIO_730) 751 if (bank->method == METHOD_GPIO_730)
575 isr_reg = bank->base + OMAP730_GPIO_INT_STATUS; 752 isr_reg = bank->base + OMAP730_GPIO_INT_STATUS;
576#endif 753#endif
754#ifdef CONFIG_ARCH_OMAP24XX
755 if (bank->method == METHOD_GPIO_24XX)
756 isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
757#endif
577 758
578 isr = __raw_readl(isr_reg); 759 while(1) {
579 _enable_gpio_irqbank(bank, isr, 0); 760 isr = __raw_readl(isr_reg);
580 _clear_gpio_irqbank(bank, isr); 761 _enable_gpio_irqbank(bank, isr, 0);
581 _enable_gpio_irqbank(bank, isr, 1); 762 _clear_gpio_irqbank(bank, isr);
582 desc->chip->unmask(irq); 763 _enable_gpio_irqbank(bank, isr, 1);
583 764 desc->chip->unmask(irq);
584 if (unlikely(!isr)) 765
585 return; 766 if (!isr)
586 767 break;
587 gpio_irq = bank->virtual_irq_start; 768
588 for (; isr != 0; isr >>= 1, gpio_irq++) { 769 gpio_irq = bank->virtual_irq_start;
589 struct irqdesc *d; 770 for (; isr != 0; isr >>= 1, gpio_irq++) {
590 if (!(isr & 1)) 771 struct irqdesc *d;
591 continue; 772 if (!(isr & 1))
592 d = irq_desc + gpio_irq; 773 continue;
593 desc_handle_irq(gpio_irq, d, regs); 774 d = irq_desc + gpio_irq;
594 } 775 desc_handle_irq(gpio_irq, d, regs);
776 }
777 }
595} 778}
596 779
597static void gpio_ack_irq(unsigned int irq) 780static void gpio_ack_irq(unsigned int irq)
@@ -613,14 +796,10 @@ static void gpio_mask_irq(unsigned int irq)
613static void gpio_unmask_irq(unsigned int irq) 796static void gpio_unmask_irq(unsigned int irq)
614{ 797{
615 unsigned int gpio = irq - IH_GPIO_BASE; 798 unsigned int gpio = irq - IH_GPIO_BASE;
799 unsigned int gpio_idx = get_gpio_index(gpio);
616 struct gpio_bank *bank = get_gpio_bank(gpio); 800 struct gpio_bank *bank = get_gpio_bank(gpio);
617 801
618 if (_get_gpio_edge_ctrl(bank, get_gpio_index(gpio)) == OMAP_GPIO_NO_EDGE) { 802 _set_gpio_irqenable(bank, gpio_idx, 1);
619 printk(KERN_ERR "OMAP GPIO %d: trying to enable GPIO IRQ while no edge is set\n",
620 gpio);
621 _set_gpio_edge_ctrl(bank, get_gpio_index(gpio), OMAP_GPIO_RISING_EDGE);
622 }
623 _set_gpio_irqenable(bank, gpio, 1);
624} 803}
625 804
626static void mpuio_ack_irq(unsigned int irq) 805static void mpuio_ack_irq(unsigned int irq)
@@ -645,9 +824,11 @@ static void mpuio_unmask_irq(unsigned int irq)
645} 824}
646 825
647static struct irqchip gpio_irq_chip = { 826static struct irqchip gpio_irq_chip = {
648 .ack = gpio_ack_irq, 827 .ack = gpio_ack_irq,
649 .mask = gpio_mask_irq, 828 .mask = gpio_mask_irq,
650 .unmask = gpio_unmask_irq, 829 .unmask = gpio_unmask_irq,
830 .set_type = gpio_irq_type,
831 .set_wake = gpio_wake_enable,
651}; 832};
652 833
653static struct irqchip mpuio_irq_chip = { 834static struct irqchip mpuio_irq_chip = {
@@ -657,6 +838,7 @@ static struct irqchip mpuio_irq_chip = {
657}; 838};
658 839
659static int initialized = 0; 840static int initialized = 0;
841static struct clk * gpio_ck = NULL;
660 842
661static int __init _omap_gpio_init(void) 843static int __init _omap_gpio_init(void)
662{ 844{
@@ -665,6 +847,14 @@ static int __init _omap_gpio_init(void)
665 847
666 initialized = 1; 848 initialized = 1;
667 849
850 if (cpu_is_omap1510()) {
851 gpio_ck = clk_get(NULL, "arm_gpio_ck");
852 if (IS_ERR(gpio_ck))
853 printk("Could not get arm_gpio_ck\n");
854 else
855 clk_use(gpio_ck);
856 }
857
668#ifdef CONFIG_ARCH_OMAP1510 858#ifdef CONFIG_ARCH_OMAP1510
669 if (cpu_is_omap1510()) { 859 if (cpu_is_omap1510()) {
670 printk(KERN_INFO "OMAP1510 GPIO hardware\n"); 860 printk(KERN_INFO "OMAP1510 GPIO hardware\n");
@@ -674,7 +864,7 @@ static int __init _omap_gpio_init(void)
674#endif 864#endif
675#if defined(CONFIG_ARCH_OMAP16XX) 865#if defined(CONFIG_ARCH_OMAP16XX)
676 if (cpu_is_omap16xx()) { 866 if (cpu_is_omap16xx()) {
677 int rev; 867 u32 rev;
678 868
679 gpio_bank_count = 5; 869 gpio_bank_count = 5;
680 gpio_bank = gpio_bank_1610; 870 gpio_bank = gpio_bank_1610;
@@ -690,6 +880,17 @@ static int __init _omap_gpio_init(void)
690 gpio_bank = gpio_bank_730; 880 gpio_bank = gpio_bank_730;
691 } 881 }
692#endif 882#endif
883#ifdef CONFIG_ARCH_OMAP24XX
884 if (cpu_is_omap24xx()) {
885 int rev;
886
887 gpio_bank_count = 4;
888 gpio_bank = gpio_bank_24xx;
889 rev = omap_readl(gpio_bank[0].base + OMAP24XX_GPIO_REVISION);
890 printk(KERN_INFO "OMAP24xx GPIO hardware version %d.%d\n",
891 (rev >> 4) & 0x0f, rev & 0x0f);
892 }
893#endif
693 for (i = 0; i < gpio_bank_count; i++) { 894 for (i = 0; i < gpio_bank_count; i++) {
694 int j, gpio_count = 16; 895 int j, gpio_count = 16;
695 896
@@ -710,6 +911,7 @@ static int __init _omap_gpio_init(void)
710 if (bank->method == METHOD_GPIO_1610) { 911 if (bank->method == METHOD_GPIO_1610) {
711 __raw_writew(0x0000, bank->base + OMAP1610_GPIO_IRQENABLE1); 912 __raw_writew(0x0000, bank->base + OMAP1610_GPIO_IRQENABLE1);
712 __raw_writew(0xffff, bank->base + OMAP1610_GPIO_IRQSTATUS1); 913 __raw_writew(0xffff, bank->base + OMAP1610_GPIO_IRQSTATUS1);
914 __raw_writew(0x0014, bank->base + OMAP1610_GPIO_SYSCONFIG);
713 } 915 }
714#endif 916#endif
715#ifdef CONFIG_ARCH_OMAP730 917#ifdef CONFIG_ARCH_OMAP730
@@ -720,6 +922,14 @@ static int __init _omap_gpio_init(void)
720 gpio_count = 32; /* 730 has 32-bit GPIOs */ 922 gpio_count = 32; /* 730 has 32-bit GPIOs */
721 } 923 }
722#endif 924#endif
925#ifdef CONFIG_ARCH_OMAP24XX
926 if (bank->method == METHOD_GPIO_24XX) {
927 __raw_writel(0x00000000, bank->base + OMAP24XX_GPIO_IRQENABLE1);
928 __raw_writel(0xffffffff, bank->base + OMAP24XX_GPIO_IRQSTATUS1);
929
930 gpio_count = 32;
931 }
932#endif
723 for (j = bank->virtual_irq_start; 933 for (j = bank->virtual_irq_start;
724 j < bank->virtual_irq_start + gpio_count; j++) { 934 j < bank->virtual_irq_start + gpio_count; j++) {
725 if (bank->method == METHOD_MPUIO) 935 if (bank->method == METHOD_MPUIO)
@@ -735,12 +945,97 @@ static int __init _omap_gpio_init(void)
735 945
736 /* Enable system clock for GPIO module. 946 /* Enable system clock for GPIO module.
737 * The CAM_CLK_CTRL *is* really the right place. */ 947 * The CAM_CLK_CTRL *is* really the right place. */
738 if (cpu_is_omap1610() || cpu_is_omap1710()) 948 if (cpu_is_omap16xx())
739 omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04, ULPD_CAM_CLK_CTRL); 949 omap_writel(omap_readl(ULPD_CAM_CLK_CTRL) | 0x04, ULPD_CAM_CLK_CTRL);
740 950
741 return 0; 951 return 0;
742} 952}
743 953
954#if defined (CONFIG_ARCH_OMAP16XX) || defined (CONFIG_ARCH_OMAP24XX)
955static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg)
956{
957 int i;
958
959 if (!cpu_is_omap24xx() && !cpu_is_omap16xx())
960 return 0;
961
962 for (i = 0; i < gpio_bank_count; i++) {
963 struct gpio_bank *bank = &gpio_bank[i];
964 void __iomem *wake_status;
965 void __iomem *wake_clear;
966 void __iomem *wake_set;
967
968 switch (bank->method) {
969 case METHOD_GPIO_1610:
970 wake_status = bank->base + OMAP1610_GPIO_WAKEUPENABLE;
971 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
972 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
973 break;
974 case METHOD_GPIO_24XX:
975 wake_status = bank->base + OMAP24XX_GPIO_SETWKUENA;
976 wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
977 wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
978 break;
979 default:
980 continue;
981 }
982
983 spin_lock(&bank->lock);
984 bank->saved_wakeup = __raw_readl(wake_status);
985 __raw_writel(0xffffffff, wake_clear);
986 __raw_writel(bank->suspend_wakeup, wake_set);
987 spin_unlock(&bank->lock);
988 }
989
990 return 0;
991}
992
993static int omap_gpio_resume(struct sys_device *dev)
994{
995 int i;
996
997 if (!cpu_is_omap24xx() && !cpu_is_omap16xx())
998 return 0;
999
1000 for (i = 0; i < gpio_bank_count; i++) {
1001 struct gpio_bank *bank = &gpio_bank[i];
1002 void __iomem *wake_clear;
1003 void __iomem *wake_set;
1004
1005 switch (bank->method) {
1006 case METHOD_GPIO_1610:
1007 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1008 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1009 break;
1010 case METHOD_GPIO_24XX:
1011 wake_clear = bank->base + OMAP1610_GPIO_CLEAR_WAKEUPENA;
1012 wake_set = bank->base + OMAP1610_GPIO_SET_WAKEUPENA;
1013 break;
1014 default:
1015 continue;
1016 }
1017
1018 spin_lock(&bank->lock);
1019 __raw_writel(0xffffffff, wake_clear);
1020 __raw_writel(bank->saved_wakeup, wake_set);
1021 spin_unlock(&bank->lock);
1022 }
1023
1024 return 0;
1025}
1026
1027static struct sysdev_class omap_gpio_sysclass = {
1028 set_kset_name("gpio"),
1029 .suspend = omap_gpio_suspend,
1030 .resume = omap_gpio_resume,
1031};
1032
1033static struct sys_device omap_gpio_device = {
1034 .id = 0,
1035 .cls = &omap_gpio_sysclass,
1036};
1037#endif
1038
744/* 1039/*
745 * This may get called early from board specific init 1040 * This may get called early from board specific init
746 */ 1041 */
@@ -752,11 +1047,30 @@ int omap_gpio_init(void)
752 return 0; 1047 return 0;
753} 1048}
754 1049
1050static int __init omap_gpio_sysinit(void)
1051{
1052 int ret = 0;
1053
1054 if (!initialized)
1055 ret = _omap_gpio_init();
1056
1057#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP24XX)
1058 if (cpu_is_omap16xx() || cpu_is_omap24xx()) {
1059 if (ret == 0) {
1060 ret = sysdev_class_register(&omap_gpio_sysclass);
1061 if (ret == 0)
1062 ret = sysdev_register(&omap_gpio_device);
1063 }
1064 }
1065#endif
1066
1067 return ret;
1068}
1069
755EXPORT_SYMBOL(omap_request_gpio); 1070EXPORT_SYMBOL(omap_request_gpio);
756EXPORT_SYMBOL(omap_free_gpio); 1071EXPORT_SYMBOL(omap_free_gpio);
757EXPORT_SYMBOL(omap_set_gpio_direction); 1072EXPORT_SYMBOL(omap_set_gpio_direction);
758EXPORT_SYMBOL(omap_set_gpio_dataout); 1073EXPORT_SYMBOL(omap_set_gpio_dataout);
759EXPORT_SYMBOL(omap_get_gpio_datain); 1074EXPORT_SYMBOL(omap_get_gpio_datain);
760EXPORT_SYMBOL(omap_set_gpio_edge_ctrl);
761 1075
762arch_initcall(omap_gpio_init); 1076arch_initcall(omap_gpio_sysinit);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 43567d5edddb..9c9b7df3faf6 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -27,6 +27,7 @@
27#include <asm/arch/dma.h> 27#include <asm/arch/dma.h>
28#include <asm/arch/mux.h> 28#include <asm/arch/mux.h>
29#include <asm/arch/irqs.h> 29#include <asm/arch/irqs.h>
30#include <asm/arch/dsp_common.h>
30#include <asm/arch/mcbsp.h> 31#include <asm/arch/mcbsp.h>
31 32
32#include <asm/hardware/clock.h> 33#include <asm/hardware/clock.h>
@@ -187,9 +188,6 @@ static int omap_mcbsp_check(unsigned int id)
187 return -1; 188 return -1;
188} 189}
189 190
190#define EN_XORPCK 1
191#define DSP_RSTCT2 0xe1008014
192
193static void omap_mcbsp_dsp_request(void) 191static void omap_mcbsp_dsp_request(void)
194{ 192{
195 if (cpu_is_omap1510() || cpu_is_omap16xx()) { 193 if (cpu_is_omap1510() || cpu_is_omap16xx()) {
@@ -198,6 +196,11 @@ static void omap_mcbsp_dsp_request(void)
198 196
199 /* enable 12MHz clock to mcbsp 1 & 3 */ 197 /* enable 12MHz clock to mcbsp 1 & 3 */
200 clk_use(mcbsp_dspxor_ck); 198 clk_use(mcbsp_dspxor_ck);
199
200 /*
201 * DSP external peripheral reset
202 * FIXME: This should be moved to dsp code
203 */
201 __raw_writew(__raw_readw(DSP_RSTCT2) | 1 | 1 << 1, 204 __raw_writew(__raw_readw(DSP_RSTCT2) | 1 | 1 << 1,
202 DSP_RSTCT2); 205 DSP_RSTCT2);
203 } 206 }
diff --git a/arch/arm/plat-omap/mux.c b/arch/arm/plat-omap/mux.c
index ea7b955b9c81..64482040f89e 100644
--- a/arch/arm/plat-omap/mux.c
+++ b/arch/arm/plat-omap/mux.c
@@ -48,6 +48,9 @@ omap_cfg_reg(const reg_cfg_t reg_cfg)
48 pull_orig = 0, pull = 0; 48 pull_orig = 0, pull = 0;
49 unsigned int mask, warn = 0; 49 unsigned int mask, warn = 0;
50 50
51 if (cpu_is_omap7xx())
52 return 0;
53
51 if (reg_cfg > ARRAY_SIZE(reg_cfg_table)) { 54 if (reg_cfg > ARRAY_SIZE(reg_cfg_table)) {
52 printk(KERN_ERR "MUX: reg_cfg %d\n", reg_cfg); 55 printk(KERN_ERR "MUX: reg_cfg %d\n", reg_cfg);
53 return -EINVAL; 56 return -EINVAL;
diff --git a/arch/arm/plat-omap/ocpi.c b/arch/arm/plat-omap/ocpi.c
index 2ede2ee8cae4..1fb16f9edfd5 100644
--- a/arch/arm/plat-omap/ocpi.c
+++ b/arch/arm/plat-omap/ocpi.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/config.h> 26#include <linux/config.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/version.h>
28#include <linux/types.h> 29#include <linux/types.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
30#include <linux/kernel.h> 31#include <linux/kernel.h>
diff --git a/arch/arm/plat-omap/pm.c b/arch/arm/plat-omap/pm.c
index e6536b16c385..e15c6c1ddec9 100644
--- a/arch/arm/plat-omap/pm.c
+++ b/arch/arm/plat-omap/pm.c
@@ -39,24 +39,32 @@
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/pm.h> 41#include <linux/pm.h>
42#include <linux/interrupt.h>
42 43
43#include <asm/io.h> 44#include <asm/io.h>
45#include <asm/irq.h>
44#include <asm/mach/time.h> 46#include <asm/mach/time.h>
45#include <asm/mach-types.h> 47#include <asm/mach/irq.h>
46 48
47#include <asm/arch/omap16xx.h> 49#include <asm/mach-types.h>
50#include <asm/arch/irqs.h>
51#include <asm/arch/tc.h>
48#include <asm/arch/pm.h> 52#include <asm/arch/pm.h>
49#include <asm/arch/mux.h> 53#include <asm/arch/mux.h>
50#include <asm/arch/tc.h>
51#include <asm/arch/tps65010.h> 54#include <asm/arch/tps65010.h>
55#include <asm/arch/dsp_common.h>
52 56
53#include "clock.h" 57#include "clock.h"
58#include "sram.h"
54 59
55static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE]; 60static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE];
56static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE]; 61static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE];
57static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE]; 62static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
58static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE]; 63static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
59 64
65static void (*omap_sram_idle)(void) = NULL;
66static void (*omap_sram_suspend)(unsigned long r0, unsigned long r1) = NULL;
67
60/* 68/*
61 * Let's power down on idle, but only if we are really 69 * Let's power down on idle, but only if we are really
62 * idle, because once we start down the path of 70 * idle, because once we start down the path of
@@ -65,7 +73,6 @@ static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
65 */ 73 */
66void omap_pm_idle(void) 74void omap_pm_idle(void)
67{ 75{
68 int (*func_ptr)(void) = 0;
69 unsigned int mask32 = 0; 76 unsigned int mask32 = 0;
70 77
71 /* 78 /*
@@ -84,6 +91,13 @@ void omap_pm_idle(void)
84 mask32 = omap_readl(ARM_SYSST); 91 mask32 = omap_readl(ARM_SYSST);
85 92
86 /* 93 /*
94 * Prevent the ULPD from entering low power state by setting
95 * POWER_CTRL_REG:4 = 0
96 */
97 omap_writew(omap_readw(ULPD_POWER_CTRL) &
98 ~ULPD_DEEP_SLEEP_TRANSITION_EN, ULPD_POWER_CTRL);
99
100 /*
87 * Since an interrupt may set up a timer, we don't want to 101 * Since an interrupt may set up a timer, we don't want to
88 * reprogram the hardware timer with interrupts enabled. 102 * reprogram the hardware timer with interrupts enabled.
89 * Re-enable interrupts only after returning from idle. 103 * Re-enable interrupts only after returning from idle.
@@ -92,18 +106,9 @@ void omap_pm_idle(void)
92 106
93 if ((mask32 & DSP_IDLE) == 0) { 107 if ((mask32 & DSP_IDLE) == 0) {
94 __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4"); 108 __asm__ volatile ("mcr p15, 0, r0, c7, c0, 4");
95 } else { 109 } else
96 110 omap_sram_idle();
97 if (cpu_is_omap1510()) {
98 func_ptr = (void *)(OMAP1510_SRAM_IDLE_SUSPEND);
99 } else if (cpu_is_omap1610() || cpu_is_omap1710()) {
100 func_ptr = (void *)(OMAP1610_SRAM_IDLE_SUSPEND);
101 } else if (cpu_is_omap5912()) {
102 func_ptr = (void *)(OMAP5912_SRAM_IDLE_SUSPEND);
103 }
104 111
105 func_ptr();
106 }
107 local_fiq_enable(); 112 local_fiq_enable();
108 local_irq_enable(); 113 local_irq_enable();
109} 114}
@@ -115,58 +120,55 @@ void omap_pm_idle(void)
115 */ 120 */
116static void omap_pm_wakeup_setup(void) 121static void omap_pm_wakeup_setup(void)
117{ 122{
118 /* 123 u32 level1_wake = OMAP_IRQ_BIT(INT_IH2_IRQ);
119 * Enable ARM XOR clock and release peripheral from reset by 124 u32 level2_wake = OMAP_IRQ_BIT(INT_UART2) | OMAP_IRQ_BIT(INT_KEYBOARD);
120 * writing 1 to PER_EN bit in ARM_RSTCT2, this is required
121 * for UART configuration to use UART2 to wake up.
122 */
123
124 omap_writel(omap_readl(ARM_IDLECT2) | ENABLE_XORCLK, ARM_IDLECT2);
125 omap_writel(omap_readl(ARM_RSTCT2) | PER_EN, ARM_RSTCT2);
126 omap_writew(MODEM_32K_EN, ULPD_CLOCK_CTRL);
127 125
128 /* 126 /*
129 * Turn off all interrupts except L1-2nd level cascade, 127 * Turn off all interrupts except GPIO bank 1, L1-2nd level cascade,
130 * and the L2 wakeup interrupts: keypad and UART2. 128 * and the L2 wakeup interrupts: keypad and UART2. Note that the
129 * drivers must still separately call omap_set_gpio_wakeup() to
130 * wake up to a GPIO interrupt.
131 */ 131 */
132 if (cpu_is_omap1510() || cpu_is_omap16xx())
133 level1_wake |= OMAP_IRQ_BIT(INT_GPIO_BANK1);
134 else if (cpu_is_omap730())
135 level1_wake |= OMAP_IRQ_BIT(INT_730_GPIO_BANK1);
132 136
133 omap_writel(~IRQ_LEVEL2, OMAP_IH1_MIR); 137 omap_writel(~level1_wake, OMAP_IH1_MIR);
134 138
135 if (cpu_is_omap1510()) { 139 if (cpu_is_omap1510())
136 omap_writel(~(IRQ_UART2 | IRQ_KEYBOARD), OMAP_IH2_MIR); 140 omap_writel(~level2_wake, OMAP_IH2_MIR);
137 }
138 141
142 /* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
139 if (cpu_is_omap16xx()) { 143 if (cpu_is_omap16xx()) {
140 omap_writel(~(IRQ_UART2 | IRQ_KEYBOARD), OMAP_IH2_0_MIR); 144 omap_writel(~level2_wake, OMAP_IH2_0_MIR);
141 145 omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ), OMAP_IH2_1_MIR);
142 omap_writel(~0x0, OMAP_IH2_1_MIR);
143 omap_writel(~0x0, OMAP_IH2_2_MIR); 146 omap_writel(~0x0, OMAP_IH2_2_MIR);
144 omap_writel(~0x0, OMAP_IH2_3_MIR); 147 omap_writel(~0x0, OMAP_IH2_3_MIR);
145 } 148 }
146 149
147 /* New IRQ agreement */ 150 /* New IRQ agreement, recalculate in cascade order */
151 omap_writel(1, OMAP_IH2_CONTROL);
148 omap_writel(1, OMAP_IH1_CONTROL); 152 omap_writel(1, OMAP_IH1_CONTROL);
149
150 /* external PULL to down, bit 22 = 0 */
151 omap_writel(omap_readl(PULL_DWN_CTRL_2) & ~(1<<22), PULL_DWN_CTRL_2);
152} 153}
153 154
154void omap_pm_suspend(void) 155void omap_pm_suspend(void)
155{ 156{
156 unsigned int mask32 = 0;
157 unsigned long arg0 = 0, arg1 = 0; 157 unsigned long arg0 = 0, arg1 = 0;
158 int (*func_ptr)(unsigned short, unsigned short) = 0;
159 unsigned short save_dsp_idlect2;
160 158
161 printk("PM: OMAP%x is entering deep sleep now ...\n", system_rev); 159 printk("PM: OMAP%x is trying to enter deep sleep...\n", system_rev);
160
161 omap_serial_wake_trigger(1);
162 162
163 if (machine_is_omap_osk()) { 163 if (machine_is_omap_osk()) {
164 /* Stop LED1 (D9) blink */ 164 /* Stop LED1 (D9) blink */
165 tps65010_set_led(LED1, OFF); 165 tps65010_set_led(LED1, OFF);
166 } 166 }
167 167
168 omap_writew(0xffff, ULPD_SOFT_DISABLE_REQ_REG);
169
168 /* 170 /*
169 * Step 1: turn off interrupts 171 * Step 1: turn off interrupts (FIXME: NOTE: already disabled)
170 */ 172 */
171 173
172 local_irq_disable(); 174 local_irq_disable();
@@ -207,6 +209,8 @@ void omap_pm_suspend(void)
207 ARM_SAVE(ARM_CKCTL); 209 ARM_SAVE(ARM_CKCTL);
208 ARM_SAVE(ARM_IDLECT1); 210 ARM_SAVE(ARM_IDLECT1);
209 ARM_SAVE(ARM_IDLECT2); 211 ARM_SAVE(ARM_IDLECT2);
212 if (!(cpu_is_omap1510()))
213 ARM_SAVE(ARM_IDLECT3);
210 ARM_SAVE(ARM_EWUPCT); 214 ARM_SAVE(ARM_EWUPCT);
211 ARM_SAVE(ARM_RSTCT1); 215 ARM_SAVE(ARM_RSTCT1);
212 ARM_SAVE(ARM_RSTCT2); 216 ARM_SAVE(ARM_RSTCT2);
@@ -214,42 +218,12 @@ void omap_pm_suspend(void)
214 ULPD_SAVE(ULPD_CLOCK_CTRL); 218 ULPD_SAVE(ULPD_CLOCK_CTRL);
215 ULPD_SAVE(ULPD_STATUS_REQ); 219 ULPD_SAVE(ULPD_STATUS_REQ);
216 220
217 /* 221 /* (Step 3 removed - we now allow deep sleep by default) */
218 * Step 3: LOW_PWR signal enabling
219 *
220 * Allow the LOW_PWR signal to be visible on MPUIO5 ball.
221 */
222 if (cpu_is_omap1510()) {
223 /* POWER_CTRL_REG = 0x1 (LOW_POWER is available) */
224 omap_writew(omap_readw(ULPD_POWER_CTRL) |
225 OMAP1510_ULPD_LOW_POWER_REQ, ULPD_POWER_CTRL);
226 } else if (cpu_is_omap16xx()) {
227 /* POWER_CTRL_REG = 0x1 (LOW_POWER is available) */
228 omap_writew(omap_readw(ULPD_POWER_CTRL) |
229 OMAP1610_ULPD_LOW_POWER_REQ, ULPD_POWER_CTRL);
230 }
231
232 /* configure LOW_PWR pin */
233 omap_cfg_reg(T20_1610_LOW_PWR);
234 222
235 /* 223 /*
236 * Step 4: OMAP DSP Shutdown 224 * Step 4: OMAP DSP Shutdown
237 */ 225 */
238 226
239 /* Set DSP_RST = 1 and DSP_EN = 0, put DSP block into reset */
240 omap_writel((omap_readl(ARM_RSTCT1) | DSP_RST) & ~DSP_ENABLE,
241 ARM_RSTCT1);
242
243 /* Set DSP boot mode to DSP-IDLE, DSP_BOOT_MODE = 0x2 */
244 omap_writel(DSP_IDLE_MODE, MPUI_DSP_BOOT_CONFIG);
245
246 /* Set EN_DSPCK = 0, stop DSP block clock */
247 omap_writel(omap_readl(ARM_CKCTL) & ~DSP_CLOCK_ENABLE, ARM_CKCTL);
248
249 /* Stop any DSP domain clocks */
250 omap_writel(omap_readl(ARM_IDLECT2) | (1<<EN_APICK), ARM_IDLECT2);
251 save_dsp_idlect2 = __raw_readw(DSP_IDLECT2);
252 __raw_writew(0, DSP_IDLECT2);
253 227
254 /* 228 /*
255 * Step 5: Wakeup Event Setup 229 * Step 5: Wakeup Event Setup
@@ -258,24 +232,9 @@ void omap_pm_suspend(void)
258 omap_pm_wakeup_setup(); 232 omap_pm_wakeup_setup();
259 233
260 /* 234 /*
261 * Step 6a: ARM and Traffic controller shutdown 235 * Step 6: ARM and Traffic controller shutdown
262 *
263 * Step 6 starts here with clock and watchdog disable
264 */ 236 */
265 237
266 /* stop clocks */
267 mask32 = omap_readl(ARM_IDLECT2);
268 mask32 &= ~(1<<EN_WDTCK); /* bit 0 -> 0 (WDT clock) */
269 mask32 |= (1<<EN_XORPCK); /* bit 1 -> 1 (XORPCK clock) */
270 mask32 &= ~(1<<EN_PERCK); /* bit 2 -> 0 (MPUPER_CK clock) */
271 mask32 &= ~(1<<EN_LCDCK); /* bit 3 -> 0 (LCDC clock) */
272 mask32 &= ~(1<<EN_LBCK); /* bit 4 -> 0 (local bus clock) */
273 mask32 |= (1<<EN_APICK); /* bit 6 -> 1 (MPUI clock) */
274 mask32 &= ~(1<<EN_TIMCK); /* bit 7 -> 0 (MPU timer clock) */
275 mask32 &= ~(1<<DMACK_REQ); /* bit 8 -> 0 (DMAC clock) */
276 mask32 &= ~(1<<EN_GPIOCK); /* bit 9 -> 0 (GPIO clock) */
277 omap_writel(mask32, ARM_IDLECT2);
278
279 /* disable ARM watchdog */ 238 /* disable ARM watchdog */
280 omap_writel(0x00F5, OMAP_WDT_TIMER_MODE); 239 omap_writel(0x00F5, OMAP_WDT_TIMER_MODE);
281 omap_writel(0x00A0, OMAP_WDT_TIMER_MODE); 240 omap_writel(0x00A0, OMAP_WDT_TIMER_MODE);
@@ -295,47 +254,24 @@ void omap_pm_suspend(void)
295 arg0 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT1]; 254 arg0 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT1];
296 arg1 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT2]; 255 arg1 = arm_sleep_save[ARM_SLEEP_SAVE_ARM_IDLECT2];
297 256
298 if (cpu_is_omap1510()) {
299 func_ptr = (void *)(OMAP1510_SRAM_API_SUSPEND);
300 } else if (cpu_is_omap1610() || cpu_is_omap1710()) {
301 func_ptr = (void *)(OMAP1610_SRAM_API_SUSPEND);
302 } else if (cpu_is_omap5912()) {
303 func_ptr = (void *)(OMAP5912_SRAM_API_SUSPEND);
304 }
305
306 /* 257 /*
307 * Step 6c: ARM and Traffic controller shutdown 258 * Step 6c: ARM and Traffic controller shutdown
308 * 259 *
309 * Jump to assembly code. The processor will stay there 260 * Jump to assembly code. The processor will stay there
310 * until wake up. 261 * until wake up.
311 */ 262 */
312 263 omap_sram_suspend(arg0, arg1);
313 func_ptr(arg0, arg1);
314 264
315 /* 265 /*
316 * If we are here, processor is woken up! 266 * If we are here, processor is woken up!
317 */ 267 */
318 268
319 if (cpu_is_omap1510()) {
320 /* POWER_CTRL_REG = 0x0 (LOW_POWER is disabled) */
321 omap_writew(omap_readw(ULPD_POWER_CTRL) &
322 ~OMAP1510_ULPD_LOW_POWER_REQ, ULPD_POWER_CTRL);
323 } else if (cpu_is_omap16xx()) {
324 /* POWER_CTRL_REG = 0x0 (LOW_POWER is disabled) */
325 omap_writew(omap_readw(ULPD_POWER_CTRL) &
326 ~OMAP1610_ULPD_LOW_POWER_REQ, ULPD_POWER_CTRL);
327 }
328
329
330 /* Restore DSP clocks */
331 omap_writel(omap_readl(ARM_IDLECT2) | (1<<EN_APICK), ARM_IDLECT2);
332 __raw_writew(save_dsp_idlect2, DSP_IDLECT2);
333 ARM_RESTORE(ARM_IDLECT2);
334
335 /* 269 /*
336 * Restore ARM state, except ARM_IDLECT1/2 which omap_cpu_suspend did 270 * Restore ARM state, except ARM_IDLECT1/2 which omap_cpu_suspend did
337 */ 271 */
338 272
273 if (!(cpu_is_omap1510()))
274 ARM_RESTORE(ARM_IDLECT3);
339 ARM_RESTORE(ARM_CKCTL); 275 ARM_RESTORE(ARM_CKCTL);
340 ARM_RESTORE(ARM_EWUPCT); 276 ARM_RESTORE(ARM_EWUPCT);
341 ARM_RESTORE(ARM_RSTCT1); 277 ARM_RESTORE(ARM_RSTCT1);
@@ -366,6 +302,8 @@ void omap_pm_suspend(void)
366 MPUI1610_RESTORE(OMAP_IH2_3_MIR); 302 MPUI1610_RESTORE(OMAP_IH2_3_MIR);
367 } 303 }
368 304
305 omap_writew(0, ULPD_SOFT_DISABLE_REQ_REG);
306
369 /* 307 /*
370 * Reenable interrupts 308 * Reenable interrupts
371 */ 309 */
@@ -373,6 +311,8 @@ void omap_pm_suspend(void)
373 local_irq_enable(); 311 local_irq_enable();
374 local_fiq_enable(); 312 local_fiq_enable();
375 313
314 omap_serial_wake_trigger(0);
315
376 printk("PM: OMAP%x is re-starting from deep sleep...\n", system_rev); 316 printk("PM: OMAP%x is re-starting from deep sleep...\n", system_rev);
377 317
378 if (machine_is_omap_osk()) { 318 if (machine_is_omap_osk()) {
@@ -401,6 +341,8 @@ static int omap_pm_read_proc(
401 ARM_SAVE(ARM_CKCTL); 341 ARM_SAVE(ARM_CKCTL);
402 ARM_SAVE(ARM_IDLECT1); 342 ARM_SAVE(ARM_IDLECT1);
403 ARM_SAVE(ARM_IDLECT2); 343 ARM_SAVE(ARM_IDLECT2);
344 if (!(cpu_is_omap1510()))
345 ARM_SAVE(ARM_IDLECT3);
404 ARM_SAVE(ARM_EWUPCT); 346 ARM_SAVE(ARM_EWUPCT);
405 ARM_SAVE(ARM_RSTCT1); 347 ARM_SAVE(ARM_RSTCT1);
406 ARM_SAVE(ARM_RSTCT2); 348 ARM_SAVE(ARM_RSTCT2);
@@ -436,6 +378,7 @@ static int omap_pm_read_proc(
436 "ARM_CKCTL_REG: 0x%-8x \n" 378 "ARM_CKCTL_REG: 0x%-8x \n"
437 "ARM_IDLECT1_REG: 0x%-8x \n" 379 "ARM_IDLECT1_REG: 0x%-8x \n"
438 "ARM_IDLECT2_REG: 0x%-8x \n" 380 "ARM_IDLECT2_REG: 0x%-8x \n"
381 "ARM_IDLECT3_REG: 0x%-8x \n"
439 "ARM_EWUPCT_REG: 0x%-8x \n" 382 "ARM_EWUPCT_REG: 0x%-8x \n"
440 "ARM_RSTCT1_REG: 0x%-8x \n" 383 "ARM_RSTCT1_REG: 0x%-8x \n"
441 "ARM_RSTCT2_REG: 0x%-8x \n" 384 "ARM_RSTCT2_REG: 0x%-8x \n"
@@ -449,6 +392,7 @@ static int omap_pm_read_proc(
449 ARM_SHOW(ARM_CKCTL), 392 ARM_SHOW(ARM_CKCTL),
450 ARM_SHOW(ARM_IDLECT1), 393 ARM_SHOW(ARM_IDLECT1),
451 ARM_SHOW(ARM_IDLECT2), 394 ARM_SHOW(ARM_IDLECT2),
395 ARM_SHOW(ARM_IDLECT3),
452 ARM_SHOW(ARM_EWUPCT), 396 ARM_SHOW(ARM_EWUPCT),
453 ARM_SHOW(ARM_RSTCT1), 397 ARM_SHOW(ARM_RSTCT1),
454 ARM_SHOW(ARM_RSTCT2), 398 ARM_SHOW(ARM_RSTCT2),
@@ -507,7 +451,7 @@ static void omap_pm_init_proc(void)
507 451
508 entry = create_proc_read_entry("driver/omap_pm", 452 entry = create_proc_read_entry("driver/omap_pm",
509 S_IWUSR | S_IRUGO, NULL, 453 S_IWUSR | S_IRUGO, NULL,
510 omap_pm_read_proc, 0); 454 omap_pm_read_proc, NULL);
511} 455}
512 456
513#endif /* DEBUG && CONFIG_PROC_FS */ 457#endif /* DEBUG && CONFIG_PROC_FS */
@@ -580,7 +524,21 @@ static int omap_pm_finish(suspend_state_t state)
580} 524}
581 525
582 526
583struct pm_ops omap_pm_ops ={ 527static irqreturn_t omap_wakeup_interrupt(int irq, void * dev,
528 struct pt_regs * regs)
529{
530 return IRQ_HANDLED;
531}
532
533static struct irqaction omap_wakeup_irq = {
534 .name = "peripheral wakeup",
535 .flags = SA_INTERRUPT,
536 .handler = omap_wakeup_interrupt
537};
538
539
540
541static struct pm_ops omap_pm_ops ={
584 .pm_disk_mode = 0, 542 .pm_disk_mode = 0,
585 .prepare = omap_pm_prepare, 543 .prepare = omap_pm_prepare,
586 .enter = omap_pm_enter, 544 .enter = omap_pm_enter,
@@ -590,42 +548,61 @@ struct pm_ops omap_pm_ops ={
590static int __init omap_pm_init(void) 548static int __init omap_pm_init(void)
591{ 549{
592 printk("Power Management for TI OMAP.\n"); 550 printk("Power Management for TI OMAP.\n");
593 pm_idle = omap_pm_idle;
594 /* 551 /*
595 * We copy the assembler sleep/wakeup routines to SRAM. 552 * We copy the assembler sleep/wakeup routines to SRAM.
596 * These routines need to be in SRAM as that's the only 553 * These routines need to be in SRAM as that's the only
597 * memory the MPU can see when it wakes up. 554 * memory the MPU can see when it wakes up.
598 */ 555 */
599
600#ifdef CONFIG_ARCH_OMAP1510
601 if (cpu_is_omap1510()) { 556 if (cpu_is_omap1510()) {
602 memcpy((void *)OMAP1510_SRAM_IDLE_SUSPEND, 557 omap_sram_idle = omap_sram_push(omap1510_idle_loop_suspend,
603 omap1510_idle_loop_suspend, 558 omap1510_idle_loop_suspend_sz);
604 omap1510_idle_loop_suspend_sz); 559 omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend,
605 memcpy((void *)OMAP1510_SRAM_API_SUSPEND, omap1510_cpu_suspend, 560 omap1510_cpu_suspend_sz);
606 omap1510_cpu_suspend_sz); 561 } else if (cpu_is_omap16xx()) {
607 } else 562 omap_sram_idle = omap_sram_push(omap1610_idle_loop_suspend,
608#endif 563 omap1610_idle_loop_suspend_sz);
609 if (cpu_is_omap1610() || cpu_is_omap1710()) { 564 omap_sram_suspend = omap_sram_push(omap1610_cpu_suspend,
610 memcpy((void *)OMAP1610_SRAM_IDLE_SUSPEND, 565 omap1610_cpu_suspend_sz);
611 omap1610_idle_loop_suspend,
612 omap1610_idle_loop_suspend_sz);
613 memcpy((void *)OMAP1610_SRAM_API_SUSPEND, omap1610_cpu_suspend,
614 omap1610_cpu_suspend_sz);
615 } else if (cpu_is_omap5912()) {
616 memcpy((void *)OMAP5912_SRAM_IDLE_SUSPEND,
617 omap1610_idle_loop_suspend,
618 omap1610_idle_loop_suspend_sz);
619 memcpy((void *)OMAP5912_SRAM_API_SUSPEND, omap1610_cpu_suspend,
620 omap1610_cpu_suspend_sz);
621 } 566 }
622 567
568 if (omap_sram_idle == NULL || omap_sram_suspend == NULL) {
569 printk(KERN_ERR "PM not initialized: Missing SRAM support\n");
570 return -ENODEV;
571 }
572
573 pm_idle = omap_pm_idle;
574
575 setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
576#if 0
577 /* --- BEGIN BOARD-DEPENDENT CODE --- */
578 /* Sleepx mask direction */
579 omap_writew((omap_readw(0xfffb5008) & ~2), 0xfffb5008);
580 /* Unmask sleepx signal */
581 omap_writew((omap_readw(0xfffb5004) & ~2), 0xfffb5004);
582 /* --- END BOARD-DEPENDENT CODE --- */
583#endif
584
585 /* Program new power ramp-up time
586 * (0 for most boards since we don't lower voltage when in deep sleep)
587 */
588 omap_writew(ULPD_SETUP_ANALOG_CELL_3_VAL, ULPD_SETUP_ANALOG_CELL_3);
589
590 /* Setup ULPD POWER_CTRL_REG - enter deep sleep whenever possible */
591 omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL);
592
593 /* Configure IDLECT3 */
594 if (cpu_is_omap16xx())
595 omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
596
623 pm_set_ops(&omap_pm_ops); 597 pm_set_ops(&omap_pm_ops);
624 598
625#if defined(DEBUG) && defined(CONFIG_PROC_FS) 599#if defined(DEBUG) && defined(CONFIG_PROC_FS)
626 omap_pm_init_proc(); 600 omap_pm_init_proc();
627#endif 601#endif
628 602
603 /* configure LOW_PWR pin */
604 omap_cfg_reg(T20_1610_LOW_PWR);
605
629 return 0; 606 return 0;
630} 607}
631__initcall(omap_pm_init); 608__initcall(omap_pm_init);
diff --git a/arch/arm/plat-omap/sleep.S b/arch/arm/plat-omap/sleep.S
index 279490ce772b..9f745836f6aa 100644
--- a/arch/arm/plat-omap/sleep.S
+++ b/arch/arm/plat-omap/sleep.S
@@ -66,7 +66,7 @@ ENTRY(omap1510_idle_loop_suspend)
66 @ get ARM_IDLECT2 into r2 66 @ get ARM_IDLECT2 into r2
67 ldrh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 67 ldrh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
68 mov r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff 68 mov r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff
69 orr r5,r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff00 69 orr r5, r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff00
70 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 70 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
71 71
72 @ request ARM idle 72 @ request ARM idle
@@ -76,7 +76,7 @@ ENTRY(omap1510_idle_loop_suspend)
76 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 76 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
77 77
78 mov r5, #IDLE_WAIT_CYCLES & 0xff 78 mov r5, #IDLE_WAIT_CYCLES & 0xff
79 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00 79 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
80l_1510: subs r5, r5, #1 80l_1510: subs r5, r5, #1
81 bne l_1510 81 bne l_1510
82/* 82/*
@@ -96,7 +96,7 @@ l_1510: subs r5, r5, #1
96 strh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 96 strh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
97 strh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 97 strh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
98 98
99 ldmfd sp!, {r0 - r12, pc} @ restore regs and return 99 ldmfd sp!, {r0 - r12, pc} @ restore regs and return
100 100
101ENTRY(omap1510_idle_loop_suspend_sz) 101ENTRY(omap1510_idle_loop_suspend_sz)
102 .word . - omap1510_idle_loop_suspend 102 .word . - omap1510_idle_loop_suspend
@@ -115,8 +115,8 @@ ENTRY(omap1610_idle_loop_suspend)
115 @ turn off clock domains 115 @ turn off clock domains
116 @ get ARM_IDLECT2 into r2 116 @ get ARM_IDLECT2 into r2
117 ldrh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 117 ldrh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
118 mov r5, #OMAP1610_IDLE_CLOCK_DOMAINS & 0xff 118 mov r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff
119 orr r5,r5, #OMAP1610_IDLE_CLOCK_DOMAINS & 0xff00 119 orr r5, r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff00
120 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 120 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
121 121
122 @ request ARM idle 122 @ request ARM idle
@@ -126,7 +126,7 @@ ENTRY(omap1610_idle_loop_suspend)
126 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 126 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
127 127
128 mov r5, #IDLE_WAIT_CYCLES & 0xff 128 mov r5, #IDLE_WAIT_CYCLES & 0xff
129 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00 129 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
130l_1610: subs r5, r5, #1 130l_1610: subs r5, r5, #1
131 bne l_1610 131 bne l_1610
132/* 132/*
@@ -146,7 +146,7 @@ l_1610: subs r5, r5, #1
146 strh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 146 strh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
147 strh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 147 strh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
148 148
149 ldmfd sp!, {r0 - r12, pc} @ restore regs and return 149 ldmfd sp!, {r0 - r12, pc} @ restore regs and return
150 150
151ENTRY(omap1610_idle_loop_suspend_sz) 151ENTRY(omap1610_idle_loop_suspend_sz)
152 .word . - omap1610_idle_loop_suspend 152 .word . - omap1610_idle_loop_suspend
@@ -208,7 +208,7 @@ ENTRY(omap1510_cpu_suspend)
208 208
209 @ turn off clock domains 209 @ turn off clock domains
210 mov r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff 210 mov r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff
211 orr r5,r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff00 211 orr r5, r5, #OMAP1510_IDLE_CLOCK_DOMAINS & 0xff00
212 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 212 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
213 213
214 @ request ARM idle 214 @ request ARM idle
@@ -217,7 +217,7 @@ ENTRY(omap1510_cpu_suspend)
217 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 217 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
218 218
219 mov r5, #IDLE_WAIT_CYCLES & 0xff 219 mov r5, #IDLE_WAIT_CYCLES & 0xff
220 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00 220 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
221l_1510_2: 221l_1510_2:
222 subs r5, r5, #1 222 subs r5, r5, #1
223 bne l_1510_2 223 bne l_1510_2
@@ -237,7 +237,7 @@ l_1510_2:
237 strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 237 strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
238 238
239 @ restore regs and return 239 @ restore regs and return
240 ldmfd sp!, {r0 - r12, pc} 240 ldmfd sp!, {r0 - r12, pc}
241 241
242ENTRY(omap1510_cpu_suspend_sz) 242ENTRY(omap1510_cpu_suspend_sz)
243 .word . - omap1510_cpu_suspend 243 .word . - omap1510_cpu_suspend
@@ -249,21 +249,26 @@ ENTRY(omap1610_cpu_suspend)
249 @ save registers on stack 249 @ save registers on stack
250 stmfd sp!, {r0 - r12, lr} 250 stmfd sp!, {r0 - r12, lr}
251 251
252 @ Drain write cache
253 mov r4, #0
254 mcr p15, 0, r0, c7, c10, 4
255 nop
256
252 @ load base address of Traffic Controller 257 @ load base address of Traffic Controller
253 mov r4, #TCMIF_ASM_BASE & 0xff000000 258 mov r6, #TCMIF_ASM_BASE & 0xff000000
254 orr r4, r4, #TCMIF_ASM_BASE & 0x00ff0000 259 orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
255 orr r4, r4, #TCMIF_ASM_BASE & 0x0000ff00 260 orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
256 261
257 @ prepare to put SDRAM into self-refresh manually 262 @ prepare to put SDRAM into self-refresh manually
258 ldr r5, [r4, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff] 263 ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
259 orr r5, r5, #SELF_REFRESH_MODE & 0xff000000 264 orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
260 orr r5, r5, #SELF_REFRESH_MODE & 0x000000ff 265 orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
261 str r5, [r4, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff] 266 str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
262 267
263 @ prepare to put EMIFS to Sleep 268 @ prepare to put EMIFS to Sleep
264 ldr r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff] 269 ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
265 orr r5, r5, #IDLE_EMIFS_REQUEST & 0xff 270 orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
266 str r5, [r4, #EMIFS_CONFIG_ASM_OFFSET & 0xff] 271 str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
267 272
268 @ load base address of ARM_IDLECT1 and ARM_IDLECT2 273 @ load base address of ARM_IDLECT1 and ARM_IDLECT2
269 mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000 274 mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
@@ -271,26 +276,22 @@ ENTRY(omap1610_cpu_suspend)
271 orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00 276 orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
272 277
273 @ turn off clock domains 278 @ turn off clock domains
274 mov r5, #OMAP1610_IDLE_CLOCK_DOMAINS & 0xff 279 @ do not disable PERCK (0x04)
275 orr r5,r5, #OMAP1610_IDLE_CLOCK_DOMAINS & 0xff00 280 mov r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff
276 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 281 orr r5, r5, #OMAP1610_IDLECT2_SLEEP_VAL & 0xff00
277
278 @ work around errata of OMAP1610/5912. Enable (!) peripheral
279 @ clock to let the chip go into deep sleep
280 ldrh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
281 orr r5,r5, #EN_PERCK_BIT & 0xff
282 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 282 strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
283 283
284 @ request ARM idle 284 @ request ARM idle
285 mov r3, #OMAP1610_DEEP_SLEEP_REQUEST & 0xff 285 mov r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff
286 orr r3, r3, #OMAP1610_DEEP_SLEEP_REQUEST & 0xff00 286 orr r3, r3, #OMAP1610_IDLECT1_SLEEP_VAL & 0xff00
287 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 287 strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
288 288
289 mov r5, #IDLE_WAIT_CYCLES & 0xff 289 @ disable instruction cache
290 orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00 290 mrc p15, 0, r9, c1, c0, 0
291l_1610_2: 291 bic r2, r9, #0x1000
292 subs r5, r5, #1 292 mcr p15, 0, r2, c1, c0, 0
293 bne l_1610_2 293 nop
294
294/* 295/*
295 * Let's wait for the next wake up event to wake us up. r0 can't be 296 * Let's wait for the next wake up event to wake us up. r0 can't be
296 * used here because r0 holds ARM_IDLECT1 297 * used here because r0 holds ARM_IDLECT1
@@ -301,13 +302,21 @@ l_1610_2:
301 * omap1610_cpu_suspend()'s resume point. 302 * omap1610_cpu_suspend()'s resume point.
302 * 303 *
303 * It will just start executing here, so we'll restore stuff from the 304 * It will just start executing here, so we'll restore stuff from the
304 * stack, reset the ARM_IDLECT1 and ARM_IDLECT2. 305 * stack.
305 */ 306 */
307 @ re-enable Icache
308 mcr p15, 0, r9, c1, c0, 0
309
310 @ reset the ARM_IDLECT1 and ARM_IDLECT2.
306 strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff] 311 strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
307 strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff] 312 strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
308 313
314 @ Restore EMIFF controls
315 str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
316 str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
317
309 @ restore regs and return 318 @ restore regs and return
310 ldmfd sp!, {r0 - r12, pc} 319 ldmfd sp!, {r0 - r12, pc}
311 320
312ENTRY(omap1610_cpu_suspend_sz) 321ENTRY(omap1610_cpu_suspend_sz)
313 .word . - omap1610_cpu_suspend 322 .word . - omap1610_cpu_suspend
diff --git a/arch/arm/plat-omap/sram-fn.S b/arch/arm/plat-omap/sram-fn.S
new file mode 100644
index 000000000000..4bea36964a00
--- /dev/null
+++ b/arch/arm/plat-omap/sram-fn.S
@@ -0,0 +1,58 @@
1/*
2 * linux/arch/arm/plat-omap/sram.S
3 *
4 * Functions that need to be run in internal SRAM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/config.h>
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14#include <asm/arch/io.h>
15#include <asm/arch/hardware.h>
16
17 .text
18
19/*
20 * Reprograms ULPD and CKCTL.
21 */
22ENTRY(sram_reprogram_clock)
23 stmfd sp!, {r0 - r12, lr} @ save registers on stack
24
25 mov r2, #IO_ADDRESS(DPLL_CTL) & 0xff000000
26 orr r2, r2, #IO_ADDRESS(DPLL_CTL) & 0x00ff0000
27 orr r2, r2, #IO_ADDRESS(DPLL_CTL) & 0x0000ff00
28
29 mov r3, #IO_ADDRESS(ARM_CKCTL) & 0xff000000
30 orr r3, r3, #IO_ADDRESS(ARM_CKCTL) & 0x00ff0000
31 orr r3, r3, #IO_ADDRESS(ARM_CKCTL) & 0x0000ff00
32
33 tst r0, #1 << 4 @ want lock mode?
34 beq newck @ nope
35 bic r0, r0, #1 << 4 @ else clear lock bit
36 strh r0, [r2] @ set dpll into bypass mode
37 orr r0, r0, #1 << 4 @ set lock bit again
38
39newck:
40 strh r1, [r3] @ write new ckctl value
41 strh r0, [r2] @ write new dpll value
42
43 mov r4, #0x0700 @ let the clocks settle
44 orr r4, r4, #0x00ff
45delay: sub r4, r4, #1
46 cmp r4, #0
47 bne delay
48
49lock: ldrh r4, [r2], #0 @ read back dpll value
50 tst r0, #1 << 4 @ want lock mode?
51 beq out @ nope
52 tst r4, #1 << 0 @ dpll rate locked?
53 beq lock @ try again
54
55out:
56 ldmfd sp!, {r0 - r12, pc} @ restore regs and return
57ENTRY(sram_reprogram_clock_sz)
58 .word . - sram_reprogram_clock
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
new file mode 100644
index 000000000000..7719a4062e3a
--- /dev/null
+++ b/arch/arm/plat-omap/sram.c
@@ -0,0 +1,116 @@
1/*
2 * linux/arch/arm/plat-omap/sram.c
3 *
4 * OMAP SRAM detection and management
5 *
6 * Copyright (C) 2005 Nokia Corporation
7 * Written by Tony Lindgren <tony@atomide.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18
19#include <asm/mach/map.h>
20#include <asm/io.h>
21#include <asm/cacheflush.h>
22
23#include "sram.h"
24
25#define OMAP1_SRAM_BASE 0xd0000000
26#define OMAP1_SRAM_START 0x20000000
27#define SRAM_BOOTLOADER_SZ 0x80
28
29static unsigned long omap_sram_base;
30static unsigned long omap_sram_size;
31static unsigned long omap_sram_ceil;
32
33/*
34 * The amount of SRAM depends on the core type:
35 * 730 = 200K, 1510 = 512K, 5912 = 256K, 1610 = 16K, 1710 = 16K
36 * Note that we cannot try to test for SRAM here because writes
37 * to secure SRAM will hang the system. Also the SRAM is not
38 * yet mapped at this point.
39 */
40void __init omap_detect_sram(void)
41{
42 omap_sram_base = OMAP1_SRAM_BASE;
43
44 if (cpu_is_omap730())
45 omap_sram_size = 0x32000;
46 else if (cpu_is_omap1510())
47 omap_sram_size = 0x80000;
48 else if (cpu_is_omap1610() || cpu_is_omap1621() || cpu_is_omap1710())
49 omap_sram_size = 0x4000;
50 else if (cpu_is_omap1611())
51 omap_sram_size = 0x3e800;
52 else {
53 printk(KERN_ERR "Could not detect SRAM size\n");
54 omap_sram_size = 0x4000;
55 }
56
57 printk(KERN_INFO "SRAM size: 0x%lx\n", omap_sram_size);
58 omap_sram_ceil = omap_sram_base + omap_sram_size;
59}
60
61static struct map_desc omap_sram_io_desc[] __initdata = {
62 { OMAP1_SRAM_BASE, OMAP1_SRAM_START, 0, MT_DEVICE }
63};
64
65/*
66 * In order to use last 2kB of SRAM on 1611b, we must round the size
67 * up to multiple of PAGE_SIZE. We cannot use ioremap for SRAM, as
68 * clock init needs SRAM early.
69 */
70void __init omap_map_sram(void)
71{
72 if (omap_sram_size == 0)
73 return;
74
75 omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE;
76 omap_sram_io_desc[0].length *= PAGE_SIZE;
77 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
78
79 /*
80 * Looks like we need to preserve some bootloader code at the
81 * beginning of SRAM for jumping to flash for reboot to work...
82 */
83 memset((void *)omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
84 omap_sram_size - SRAM_BOOTLOADER_SZ);
85}
86
87static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl) = NULL;
88
89void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
90{
91 if (_omap_sram_reprogram_clock == NULL)
92 panic("Cannot use SRAM");
93
94 return _omap_sram_reprogram_clock(dpllctl, ckctl);
95}
96
97void * omap_sram_push(void * start, unsigned long size)
98{
99 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
100 printk(KERN_ERR "Not enough space in SRAM\n");
101 return NULL;
102 }
103 omap_sram_ceil -= size;
104 omap_sram_ceil &= ~0x3;
105 memcpy((void *)omap_sram_ceil, start, size);
106
107 return (void *)omap_sram_ceil;
108}
109
110void __init omap_sram_init(void)
111{
112 omap_detect_sram();
113 omap_map_sram();
114 _omap_sram_reprogram_clock = omap_sram_push(sram_reprogram_clock,
115 sram_reprogram_clock_sz);
116}
diff --git a/arch/arm/plat-omap/sram.h b/arch/arm/plat-omap/sram.h
new file mode 100644
index 000000000000..71984efa6ae8
--- /dev/null
+++ b/arch/arm/plat-omap/sram.h
@@ -0,0 +1,21 @@
1/*
2 * linux/arch/arm/plat-omap/sram.h
3 *
4 * Interface for functions that need to be run in internal SRAM
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __ARCH_ARM_OMAP_SRAM_H
12#define __ARCH_ARM_OMAP_SRAM_H
13
14extern void * omap_sram_push(void * start, unsigned long size);
15extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
16
17/* Do not use these */
18extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
19extern unsigned long sram_reprogram_clock_sz;
20
21#endif
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 25bc4a8dd763..98f1c76f8660 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -41,6 +41,7 @@
41 41
42/* These routines should handle the standard chip-specific modes 42/* These routines should handle the standard chip-specific modes
43 * for usb0/1/2 ports, covering basic mux and transceiver setup. 43 * for usb0/1/2 ports, covering basic mux and transceiver setup.
44 * Call omap_usb_init() once, from INIT_MACHINE().
44 * 45 *
45 * Some board-*.c files will need to set up additional mux options, 46 * Some board-*.c files will need to set up additional mux options,
46 * like for suspend handling, vbus sensing, GPIOs, and the D+ pullup. 47 * like for suspend handling, vbus sensing, GPIOs, and the D+ pullup.
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig
index 1f0373267306..1f00b3d03a07 100644
--- a/arch/arm26/Kconfig
+++ b/arch/arm26/Kconfig
@@ -55,6 +55,10 @@ config GENERIC_BUST_SPINLOCK
55config GENERIC_ISA_DMA 55config GENERIC_ISA_DMA
56 bool 56 bool
57 57
58config ARCH_MAY_HAVE_PC_FDC
59 bool
60 default y
61
58source "init/Kconfig" 62source "init/Kconfig"
59 63
60 64
diff --git a/arch/arm26/Makefile b/arch/arm26/Makefile
index ada8985530a5..e9cb8ef4f3fb 100644
--- a/arch/arm26/Makefile
+++ b/arch/arm26/Makefile
@@ -17,10 +17,6 @@ ifeq ($(CONFIG_FRAME_POINTER),y)
17CFLAGS +=-fno-omit-frame-pointer -mno-sched-prolog 17CFLAGS +=-fno-omit-frame-pointer -mno-sched-prolog
18endif 18endif
19 19
20ifeq ($(CONFIG_DEBUG_INFO),y)
21CFLAGS +=-g
22endif
23
24CFLAGS_BOOT :=-mapcs-26 -mcpu=arm3 -msoft-float -Uarm 20CFLAGS_BOOT :=-mapcs-26 -mcpu=arm3 -msoft-float -Uarm
25CFLAGS +=-mapcs-26 -mcpu=arm3 -msoft-float -Uarm 21CFLAGS +=-mapcs-26 -mcpu=arm3 -msoft-float -Uarm
26AFLAGS +=-mapcs-26 -mcpu=arm3 -msoft-float 22AFLAGS +=-mapcs-26 -mcpu=arm3 -msoft-float
diff --git a/arch/arm26/kernel/time.c b/arch/arm26/kernel/time.c
index 549a6b2e177e..e66aedd02fad 100644
--- a/arch/arm26/kernel/time.c
+++ b/arch/arm26/kernel/time.c
@@ -114,7 +114,7 @@ static unsigned long next_rtc_update;
114 */ 114 */
115static inline void do_set_rtc(void) 115static inline void do_set_rtc(void)
116{ 116{
117 if (time_status & STA_UNSYNC || set_rtc == NULL) 117 if (!ntp_synced() || set_rtc == NULL)
118 return; 118 return;
119 119
120//FIXME - timespec.tv_sec is a time_t not unsigned long 120//FIXME - timespec.tv_sec is a time_t not unsigned long
@@ -189,10 +189,7 @@ int do_settimeofday(struct timespec *tv)
189 189
190 xtime.tv_sec = tv->tv_sec; 190 xtime.tv_sec = tv->tv_sec;
191 xtime.tv_nsec = tv->tv_nsec; 191 xtime.tv_nsec = tv->tv_nsec;
192 time_adjust = 0; /* stop active adjtime() */ 192 ntp_clear();
193 time_status |= STA_UNSYNC;
194 time_maxerror = NTP_PHASE_LIMIT;
195 time_esterror = NTP_PHASE_LIMIT;
196 write_sequnlock_irq(&xtime_lock); 193 write_sequnlock_irq(&xtime_lock);
197 clock_was_set(); 194 clock_was_set();
198 return 0; 195 return 0;
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 6b7b4e0802e3..dc3dfe9b4a1a 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -240,7 +240,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
240 * The division here is not time critical since it will run once in 240 * The division here is not time critical since it will run once in
241 * 11 minutes 241 * 11 minutes
242 */ 242 */
243 if ((time_status & STA_UNSYNC) == 0 && 243 if (ntp_synced() &&
244 xtime.tv_sec > last_rtc_update + 660 && 244 xtime.tv_sec > last_rtc_update + 660 &&
245 (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 && 245 (xtime.tv_nsec / 1000) >= 500000 - (tick_nsec / 1000) / 2 &&
246 (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) { 246 (xtime.tv_nsec / 1000) <= 500000 + (tick_nsec / 1000) / 2) {
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index fa2d4323da25..a2d99b4aedcd 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -114,10 +114,7 @@ int do_settimeofday(struct timespec *tv)
114 set_normalized_timespec(&xtime, sec, nsec); 114 set_normalized_timespec(&xtime, sec, nsec);
115 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 115 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
116 116
117 time_adjust = 0; /* stop active adjtime() */ 117 ntp_clear();
118 time_status |= STA_UNSYNC;
119 time_maxerror = NTP_PHASE_LIMIT;
120 time_esterror = NTP_PHASE_LIMIT;
121 write_sequnlock_irq(&xtime_lock); 118 write_sequnlock_irq(&xtime_lock);
122 clock_was_set(); 119 clock_was_set();
123 return 0; 120 return 0;
diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c
index 075db6644694..8d6558b00e44 100644
--- a/arch/frv/kernel/time.c
+++ b/arch/frv/kernel/time.c
@@ -85,7 +85,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
85 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 85 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
86 * called as close as possible to 500 ms before the new second starts. 86 * called as close as possible to 500 ms before the new second starts.
87 */ 87 */
88 if ((time_status & STA_UNSYNC) == 0 && 88 if (ntp_synced() &&
89 xtime.tv_sec > last_rtc_update + 660 && 89 xtime.tv_sec > last_rtc_update + 660 &&
90 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 90 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
91 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2 91 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2
@@ -216,10 +216,7 @@ int do_settimeofday(struct timespec *tv)
216 set_normalized_timespec(&xtime, sec, nsec); 216 set_normalized_timespec(&xtime, sec, nsec);
217 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 217 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
218 218
219 time_adjust = 0; /* stop active adjtime() */ 219 ntp_clear();
220 time_status |= STA_UNSYNC;
221 time_maxerror = NTP_PHASE_LIMIT;
222 time_esterror = NTP_PHASE_LIMIT;
223 write_sequnlock_irq(&xtime_lock); 220 write_sequnlock_irq(&xtime_lock);
224 clock_was_set(); 221 clock_was_set();
225 return 0; 222 return 0;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 8a600218334d..af8c5d2057dd 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -116,10 +116,7 @@ int do_settimeofday(struct timespec *tv)
116 116
117 xtime.tv_sec = tv->tv_sec; 117 xtime.tv_sec = tv->tv_sec;
118 xtime.tv_nsec = tv->tv_nsec; 118 xtime.tv_nsec = tv->tv_nsec;
119 time_adjust = 0; /* stop active adjtime() */ 119 ntp_clear();
120 time_status |= STA_UNSYNC;
121 time_maxerror = NTP_PHASE_LIMIT;
122 time_esterror = NTP_PHASE_LIMIT;
123 write_sequnlock_irq(&xtime_lock); 120 write_sequnlock_irq(&xtime_lock);
124 clock_was_set(); 121 clock_was_set();
125 return 0; 122 return 0;
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 3b3b017e1c15..5d51b38bd70d 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -37,6 +37,10 @@ config GENERIC_IOMAP
37 bool 37 bool
38 default y 38 default y
39 39
40config ARCH_MAY_HAVE_PC_FDC
41 bool
42 default y
43
40source "init/Kconfig" 44source "init/Kconfig"
41 45
42menu "Processor type and features" 46menu "Processor type and features"
@@ -1318,6 +1322,11 @@ config GENERIC_IRQ_PROBE
1318 bool 1322 bool
1319 default y 1323 default y
1320 1324
1325config GENERIC_PENDING_IRQ
1326 bool
1327 depends on GENERIC_HARDIRQS && SMP
1328 default y
1329
1321config X86_SMP 1330config X86_SMP
1322 bool 1331 bool
1323 depends on SMP && !X86_VOYAGER 1332 depends on SMP && !X86_VOYAGER
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S
index 8cb420f40c58..ca668d9df164 100644
--- a/arch/i386/boot/setup.S
+++ b/arch/i386/boot/setup.S
@@ -82,7 +82,7 @@ start:
82# This is the setup header, and it must start at %cs:2 (old 0x9020:2) 82# This is the setup header, and it must start at %cs:2 (old 0x9020:2)
83 83
84 .ascii "HdrS" # header signature 84 .ascii "HdrS" # header signature
85 .word 0x0203 # header version number (>= 0x0105) 85 .word 0x0204 # header version number (>= 0x0105)
86 # or else old loadlin-1.5 will fail) 86 # or else old loadlin-1.5 will fail)
87realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 87realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
88start_sys_seg: .word SYSSEG 88start_sys_seg: .word SYSSEG
diff --git a/arch/i386/boot/tools/build.c b/arch/i386/boot/tools/build.c
index 6835f6d47c31..05798419a6a9 100644
--- a/arch/i386/boot/tools/build.c
+++ b/arch/i386/boot/tools/build.c
@@ -177,7 +177,9 @@ int main(int argc, char ** argv)
177 die("Output: seek failed"); 177 die("Output: seek failed");
178 buf[0] = (sys_size & 0xff); 178 buf[0] = (sys_size & 0xff);
179 buf[1] = ((sys_size >> 8) & 0xff); 179 buf[1] = ((sys_size >> 8) & 0xff);
180 if (write(1, buf, 2) != 2) 180 buf[2] = ((sys_size >> 16) & 0xff);
181 buf[3] = ((sys_size >> 24) & 0xff);
182 if (write(1, buf, 4) != 4)
181 die("Write of image length failed"); 183 die("Write of image length failed");
182 184
183 return 0; /* Everything is OK */ 185 return 0; /* Everything is OK */
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index a3cdf894302b..58516e2ac172 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -6,32 +6,28 @@
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7 7
8 8
9struct dmi_header {
10 u8 type;
11 u8 length;
12 u16 handle;
13};
14
15#undef DMI_DEBUG
16
17#ifdef DMI_DEBUG
18#define dmi_printk(x) printk x
19#else
20#define dmi_printk(x)
21#endif
22
23static char * __init dmi_string(struct dmi_header *dm, u8 s) 9static char * __init dmi_string(struct dmi_header *dm, u8 s)
24{ 10{
25 u8 *bp = ((u8 *) dm) + dm->length; 11 u8 *bp = ((u8 *) dm) + dm->length;
12 char *str = "";
26 13
27 if (!s) 14 if (s) {
28 return "";
29 s--;
30 while (s > 0 && *bp) {
31 bp += strlen(bp) + 1;
32 s--; 15 s--;
33 } 16 while (s > 0 && *bp) {
34 return bp; 17 bp += strlen(bp) + 1;
18 s--;
19 }
20
21 if (*bp != 0) {
22 str = alloc_bootmem(strlen(bp) + 1);
23 if (str != NULL)
24 strcpy(str, bp);
25 else
26 printk(KERN_ERR "dmi_string: out of memory.\n");
27 }
28 }
29
30 return str;
35} 31}
36 32
37/* 33/*
@@ -84,69 +80,76 @@ static int __init dmi_checksum(u8 *buf)
84 return sum == 0; 80 return sum == 0;
85} 81}
86 82
87static int __init dmi_iterate(void (*decode)(struct dmi_header *)) 83static char *dmi_ident[DMI_STRING_MAX];
84static LIST_HEAD(dmi_devices);
85
86/*
87 * Save a DMI string
88 */
89static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
88{ 90{
89 u8 buf[15]; 91 char *p, *d = (char*) dm;
90 char __iomem *p, *q;
91 92
92 /* 93 if (dmi_ident[slot])
93 * no iounmap() for that ioremap(); it would be a no-op, but it's 94 return;
94 * so early in setup that sucker gets confused into doing what 95
95 * it shouldn't if we actually call it. 96 p = dmi_string(dm, d[string]);
96 */
97 p = ioremap(0xF0000, 0x10000);
98 if (p == NULL) 97 if (p == NULL)
99 return -1; 98 return;
100 99
101 for (q = p; q < p + 0x10000; q += 16) { 100 dmi_ident[slot] = p;
102 memcpy_fromio(buf, q, 15); 101}
103 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
104 u16 num = (buf[13] << 8) | buf[12];
105 u16 len = (buf[7] << 8) | buf[6];
106 u32 base = (buf[11] << 24) | (buf[10] << 16) |
107 (buf[9] << 8) | buf[8];
108 102
109 /* 103static void __init dmi_save_devices(struct dmi_header *dm)
110 * DMI version 0.0 means that the real version is taken from 104{
111 * the SMBIOS version, which we don't know at this point. 105 int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
112 */ 106 struct dmi_device *dev;
113 if (buf[14] != 0) 107
114 printk(KERN_INFO "DMI %d.%d present.\n", 108 for (i = 0; i < count; i++) {
115 buf[14] >> 4, buf[14] & 0xF); 109 char *d = ((char *) dm) + (i * 2);
116 else
117 printk(KERN_INFO "DMI present.\n");
118 110
119 dmi_printk((KERN_INFO "%d structures occupying %d bytes.\n", 111 /* Skip disabled device */
120 num, len)); 112 if ((*d & 0x80) == 0)
121 dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", base)); 113 continue;
122 114
123 if (dmi_table(base,len, num, decode) == 0) 115 dev = alloc_bootmem(sizeof(*dev));
124 return 0; 116 if (!dev) {
117 printk(KERN_ERR "dmi_save_devices: out of memory.\n");
118 break;
125 } 119 }
120
121 dev->type = *d++ & 0x7f;
122 dev->name = dmi_string(dm, *d);
123 dev->device_data = NULL;
124
125 list_add(&dev->list, &dmi_devices);
126 } 126 }
127 return -1;
128} 127}
129 128
130static char *dmi_ident[DMI_STRING_MAX]; 129static void __init dmi_save_ipmi_device(struct dmi_header *dm)
131
132/*
133 * Save a DMI string
134 */
135static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
136{ 130{
137 char *d = (char*)dm; 131 struct dmi_device *dev;
138 char *p = dmi_string(dm, d[string]); 132 void * data;
139 133
140 if (p == NULL || *p == 0) 134 data = alloc_bootmem(dm->length);
135 if (data == NULL) {
136 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
141 return; 137 return;
142 if (dmi_ident[slot]) 138 }
139
140 memcpy(data, dm, dm->length);
141
142 dev = alloc_bootmem(sizeof(*dev));
143 if (!dev) {
144 printk(KERN_ERR "dmi_save_ipmi_device: out of memory.\n");
143 return; 145 return;
146 }
144 147
145 dmi_ident[slot] = alloc_bootmem(strlen(p) + 1); 148 dev->type = DMI_DEV_TYPE_IPMI;
146 if(dmi_ident[slot]) 149 dev->name = "IPMI controller";
147 strcpy(dmi_ident[slot], p); 150 dev->device_data = data;
148 else 151
149 printk(KERN_ERR "dmi_save_ident: out of memory.\n"); 152 list_add(&dev->list, &dmi_devices);
150} 153}
151 154
152/* 155/*
@@ -156,42 +159,69 @@ static void __init dmi_save_ident(struct dmi_header *dm, int slot, int string)
156 */ 159 */
157static void __init dmi_decode(struct dmi_header *dm) 160static void __init dmi_decode(struct dmi_header *dm)
158{ 161{
159 u8 *data __attribute__((__unused__)) = (u8 *)dm;
160
161 switch(dm->type) { 162 switch(dm->type) {
162 case 0: 163 case 0: /* BIOS Information */
163 dmi_printk(("BIOS Vendor: %s\n", dmi_string(dm, data[4])));
164 dmi_save_ident(dm, DMI_BIOS_VENDOR, 4); 164 dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
165 dmi_printk(("BIOS Version: %s\n", dmi_string(dm, data[5])));
166 dmi_save_ident(dm, DMI_BIOS_VERSION, 5); 165 dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
167 dmi_printk(("BIOS Release: %s\n", dmi_string(dm, data[8])));
168 dmi_save_ident(dm, DMI_BIOS_DATE, 8); 166 dmi_save_ident(dm, DMI_BIOS_DATE, 8);
169 break; 167 break;
170 case 1: 168 case 1: /* System Information */
171 dmi_printk(("System Vendor: %s\n", dmi_string(dm, data[4])));
172 dmi_save_ident(dm, DMI_SYS_VENDOR, 4); 169 dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
173 dmi_printk(("Product Name: %s\n", dmi_string(dm, data[5])));
174 dmi_save_ident(dm, DMI_PRODUCT_NAME, 5); 170 dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
175 dmi_printk(("Version: %s\n", dmi_string(dm, data[6])));
176 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); 171 dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
177 dmi_printk(("Serial Number: %s\n", dmi_string(dm, data[7])));
178 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); 172 dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
179 break; 173 break;
180 case 2: 174 case 2: /* Base Board Information */
181 dmi_printk(("Board Vendor: %s\n", dmi_string(dm, data[4])));
182 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); 175 dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
183 dmi_printk(("Board Name: %s\n", dmi_string(dm, data[5])));
184 dmi_save_ident(dm, DMI_BOARD_NAME, 5); 176 dmi_save_ident(dm, DMI_BOARD_NAME, 5);
185 dmi_printk(("Board Version: %s\n", dmi_string(dm, data[6])));
186 dmi_save_ident(dm, DMI_BOARD_VERSION, 6); 177 dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
187 break; 178 break;
179 case 10: /* Onboard Devices Information */
180 dmi_save_devices(dm);
181 break;
182 case 38: /* IPMI Device Information */
183 dmi_save_ipmi_device(dm);
188 } 184 }
189} 185}
190 186
191void __init dmi_scan_machine(void) 187void __init dmi_scan_machine(void)
192{ 188{
193 if (dmi_iterate(dmi_decode)) 189 u8 buf[15];
194 printk(KERN_INFO "DMI not present.\n"); 190 char __iomem *p, *q;
191
192 /*
193 * no iounmap() for that ioremap(); it would be a no-op, but it's
194 * so early in setup that sucker gets confused into doing what
195 * it shouldn't if we actually call it.
196 */
197 p = ioremap(0xF0000, 0x10000);
198 if (p == NULL)
199 goto out;
200
201 for (q = p; q < p + 0x10000; q += 16) {
202 memcpy_fromio(buf, q, 15);
203 if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) {
204 u16 num = (buf[13] << 8) | buf[12];
205 u16 len = (buf[7] << 8) | buf[6];
206 u32 base = (buf[11] << 24) | (buf[10] << 16) |
207 (buf[9] << 8) | buf[8];
208
209 /*
210 * DMI version 0.0 means that the real version is taken from
211 * the SMBIOS version, which we don't know at this point.
212 */
213 if (buf[14] != 0)
214 printk(KERN_INFO "DMI %d.%d present.\n",
215 buf[14] >> 4, buf[14] & 0xF);
216 else
217 printk(KERN_INFO "DMI present.\n");
218
219 if (dmi_table(base,len, num, dmi_decode) == 0)
220 return;
221 }
222 }
223
224out: printk(KERN_INFO "DMI not present.\n");
195} 225}
196 226
197 227
@@ -218,9 +248,9 @@ int dmi_check_system(struct dmi_system_id *list)
218 /* No match */ 248 /* No match */
219 goto fail; 249 goto fail;
220 } 250 }
251 count++;
221 if (d->callback && d->callback(d)) 252 if (d->callback && d->callback(d))
222 break; 253 break;
223 count++;
224fail: d++; 254fail: d++;
225 } 255 }
226 256
@@ -240,3 +270,32 @@ char *dmi_get_system_info(int field)
240 return dmi_ident[field]; 270 return dmi_ident[field];
241} 271}
242EXPORT_SYMBOL(dmi_get_system_info); 272EXPORT_SYMBOL(dmi_get_system_info);
273
274/**
275 * dmi_find_device - find onboard device by type/name
276 * @type: device type or %DMI_DEV_TYPE_ANY to match all device types
277 * @desc: device name string or %NULL to match all
278 * @from: previous device found in search, or %NULL for new search.
279 *
280 * Iterates through the list of known onboard devices. If a device is
281 * found with a matching @vendor and @device, a pointer to its device
282 * structure is returned. Otherwise, %NULL is returned.
283 * A new search is initiated by passing %NULL to the @from argument.
284 * If @from is not %NULL, searches continue from next device.
285 */
286struct dmi_device * dmi_find_device(int type, const char *name,
287 struct dmi_device *from)
288{
289 struct list_head *d, *head = from ? &from->list : &dmi_devices;
290
291 for(d = head->next; d != &dmi_devices; d = d->next) {
292 struct dmi_device *dev = list_entry(d, struct dmi_device, list);
293
294 if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
295 ((name == NULL) || (strcmp(dev->name, name) == 0)))
296 return dev;
297 }
298
299 return NULL;
300}
301EXPORT_SYMBOL(dmi_find_device);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index abb909793efc..3aad03839660 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -507,7 +507,7 @@ label: \
507 pushl $__KERNEL_CS; \ 507 pushl $__KERNEL_CS; \
508 pushl $sysenter_past_esp 508 pushl $sysenter_past_esp
509 509
510ENTRY(debug) 510KPROBE_ENTRY(debug)
511 cmpl $sysenter_entry,(%esp) 511 cmpl $sysenter_entry,(%esp)
512 jne debug_stack_correct 512 jne debug_stack_correct
513 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) 513 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
@@ -518,7 +518,7 @@ debug_stack_correct:
518 movl %esp,%eax # pt_regs pointer 518 movl %esp,%eax # pt_regs pointer
519 call do_debug 519 call do_debug
520 jmp ret_from_exception 520 jmp ret_from_exception
521 521 .previous .text
522/* 522/*
523 * NMI is doubly nasty. It can happen _while_ we're handling 523 * NMI is doubly nasty. It can happen _while_ we're handling
524 * a debug fault, and the debug fault hasn't yet been able to 524 * a debug fault, and the debug fault hasn't yet been able to
@@ -591,13 +591,14 @@ nmi_16bit_stack:
591 .long 1b,iret_exc 591 .long 1b,iret_exc
592.previous 592.previous
593 593
594ENTRY(int3) 594KPROBE_ENTRY(int3)
595 pushl $-1 # mark this as an int 595 pushl $-1 # mark this as an int
596 SAVE_ALL 596 SAVE_ALL
597 xorl %edx,%edx # zero error code 597 xorl %edx,%edx # zero error code
598 movl %esp,%eax # pt_regs pointer 598 movl %esp,%eax # pt_regs pointer
599 call do_int3 599 call do_int3
600 jmp ret_from_exception 600 jmp ret_from_exception
601 .previous .text
601 602
602ENTRY(overflow) 603ENTRY(overflow)
603 pushl $0 604 pushl $0
@@ -631,17 +632,19 @@ ENTRY(stack_segment)
631 pushl $do_stack_segment 632 pushl $do_stack_segment
632 jmp error_code 633 jmp error_code
633 634
634ENTRY(general_protection) 635KPROBE_ENTRY(general_protection)
635 pushl $do_general_protection 636 pushl $do_general_protection
636 jmp error_code 637 jmp error_code
638 .previous .text
637 639
638ENTRY(alignment_check) 640ENTRY(alignment_check)
639 pushl $do_alignment_check 641 pushl $do_alignment_check
640 jmp error_code 642 jmp error_code
641 643
642ENTRY(page_fault) 644KPROBE_ENTRY(page_fault)
643 pushl $do_page_fault 645 pushl $do_page_fault
644 jmp error_code 646 jmp error_code
647 .previous .text
645 648
646#ifdef CONFIG_X86_MCE 649#ifdef CONFIG_X86_MCE
647ENTRY(machine_check) 650ENTRY(machine_check)
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 6578f40bd501..0e727e6da5c9 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -33,6 +33,7 @@
33#include <linux/acpi.h> 33#include <linux/acpi.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/sysdev.h> 35#include <linux/sysdev.h>
36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/smp.h> 38#include <asm/smp.h>
38#include <asm/desc.h> 39#include <asm/desc.h>
@@ -77,7 +78,7 @@ static struct irq_pin_list {
77 int apic, pin, next; 78 int apic, pin, next;
78} irq_2_pin[PIN_MAP_SIZE]; 79} irq_2_pin[PIN_MAP_SIZE];
79 80
80int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 81int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
81#ifdef CONFIG_PCI_MSI 82#ifdef CONFIG_PCI_MSI
82#define vector_to_irq(vector) \ 83#define vector_to_irq(vector) \
83 (platform_legacy_irq(vector) ? vector : vector_irq[vector]) 84 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
@@ -222,13 +223,21 @@ static void clear_IO_APIC (void)
222 clear_IO_APIC_pin(apic, pin); 223 clear_IO_APIC_pin(apic, pin);
223} 224}
224 225
226#ifdef CONFIG_SMP
225static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask) 227static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
226{ 228{
227 unsigned long flags; 229 unsigned long flags;
228 int pin; 230 int pin;
229 struct irq_pin_list *entry = irq_2_pin + irq; 231 struct irq_pin_list *entry = irq_2_pin + irq;
230 unsigned int apicid_value; 232 unsigned int apicid_value;
233 cpumask_t tmp;
231 234
235 cpus_and(tmp, cpumask, cpu_online_map);
236 if (cpus_empty(tmp))
237 tmp = TARGET_CPUS;
238
239 cpus_and(cpumask, tmp, CPU_MASK_ALL);
240
232 apicid_value = cpu_mask_to_apicid(cpumask); 241 apicid_value = cpu_mask_to_apicid(cpumask);
233 /* Prepare to do the io_apic_write */ 242 /* Prepare to do the io_apic_write */
234 apicid_value = apicid_value << 24; 243 apicid_value = apicid_value << 24;
@@ -242,6 +251,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
242 break; 251 break;
243 entry = irq_2_pin + entry->next; 252 entry = irq_2_pin + entry->next;
244 } 253 }
254 set_irq_info(irq, cpumask);
245 spin_unlock_irqrestore(&ioapic_lock, flags); 255 spin_unlock_irqrestore(&ioapic_lock, flags);
246} 256}
247 257
@@ -259,7 +269,6 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
259# define Dprintk(x...) 269# define Dprintk(x...)
260# endif 270# endif
261 271
262cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
263 272
264#define IRQBALANCE_CHECK_ARCH -999 273#define IRQBALANCE_CHECK_ARCH -999
265static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH; 274static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
@@ -328,12 +337,7 @@ static inline void balance_irq(int cpu, int irq)
328 cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]); 337 cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
329 new_cpu = move(cpu, allowed_mask, now, 1); 338 new_cpu = move(cpu, allowed_mask, now, 1);
330 if (cpu != new_cpu) { 339 if (cpu != new_cpu) {
331 irq_desc_t *desc = irq_desc + irq; 340 set_pending_irq(irq, cpumask_of_cpu(new_cpu));
332 unsigned long flags;
333
334 spin_lock_irqsave(&desc->lock, flags);
335 pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
336 spin_unlock_irqrestore(&desc->lock, flags);
337 } 341 }
338} 342}
339 343
@@ -528,16 +532,12 @@ tryanotherirq:
528 cpus_and(tmp, target_cpu_mask, allowed_mask); 532 cpus_and(tmp, target_cpu_mask, allowed_mask);
529 533
530 if (!cpus_empty(tmp)) { 534 if (!cpus_empty(tmp)) {
531 irq_desc_t *desc = irq_desc + selected_irq;
532 unsigned long flags;
533 535
534 Dprintk("irq = %d moved to cpu = %d\n", 536 Dprintk("irq = %d moved to cpu = %d\n",
535 selected_irq, min_loaded); 537 selected_irq, min_loaded);
536 /* mark for change destination */ 538 /* mark for change destination */
537 spin_lock_irqsave(&desc->lock, flags); 539 set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
538 pending_irq_balance_cpumask[selected_irq] = 540
539 cpumask_of_cpu(min_loaded);
540 spin_unlock_irqrestore(&desc->lock, flags);
541 /* Since we made a change, come back sooner to 541 /* Since we made a change, come back sooner to
542 * check for more variation. 542 * check for more variation.
543 */ 543 */
@@ -568,7 +568,8 @@ static int balanced_irq(void *unused)
568 568
569 /* push everything to CPU 0 to give us a starting point. */ 569 /* push everything to CPU 0 to give us a starting point. */
570 for (i = 0 ; i < NR_IRQS ; i++) { 570 for (i = 0 ; i < NR_IRQS ; i++) {
571 pending_irq_balance_cpumask[i] = cpumask_of_cpu(0); 571 pending_irq_cpumask[i] = cpumask_of_cpu(0);
572 set_pending_irq(i, cpumask_of_cpu(0));
572 } 573 }
573 574
574 for ( ; ; ) { 575 for ( ; ; ) {
@@ -647,20 +648,9 @@ int __init irqbalance_disable(char *str)
647 648
648__setup("noirqbalance", irqbalance_disable); 649__setup("noirqbalance", irqbalance_disable);
649 650
650static inline void move_irq(int irq)
651{
652 /* note - we hold the desc->lock */
653 if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
654 set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]);
655 cpus_clear(pending_irq_balance_cpumask[irq]);
656 }
657}
658
659late_initcall(balanced_irq_init); 651late_initcall(balanced_irq_init);
660
661#else /* !CONFIG_IRQBALANCE */
662static inline void move_irq(int irq) { }
663#endif /* CONFIG_IRQBALANCE */ 652#endif /* CONFIG_IRQBALANCE */
653#endif /* CONFIG_SMP */
664 654
665#ifndef CONFIG_SMP 655#ifndef CONFIG_SMP
666void fastcall send_IPI_self(int vector) 656void fastcall send_IPI_self(int vector)
@@ -820,6 +810,7 @@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
820 * we need to reprogram the ioredtbls to cater for the cpus which have come online 810 * we need to reprogram the ioredtbls to cater for the cpus which have come online
821 * so mask in all cases should simply be TARGET_CPUS 811 * so mask in all cases should simply be TARGET_CPUS
822 */ 812 */
813#ifdef CONFIG_SMP
823void __init setup_ioapic_dest(void) 814void __init setup_ioapic_dest(void)
824{ 815{
825 int pin, ioapic, irq, irq_entry; 816 int pin, ioapic, irq, irq_entry;
@@ -838,6 +829,7 @@ void __init setup_ioapic_dest(void)
838 829
839 } 830 }
840} 831}
832#endif
841 833
842/* 834/*
843 * EISA Edge/Level control register, ELCR 835 * EISA Edge/Level control register, ELCR
@@ -1127,7 +1119,7 @@ static inline int IO_APIC_irq_trigger(int irq)
1127} 1119}
1128 1120
1129/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ 1121/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
1130u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; 1122u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
1131 1123
1132int assign_irq_vector(int irq) 1124int assign_irq_vector(int irq)
1133{ 1125{
@@ -1249,6 +1241,7 @@ static void __init setup_IO_APIC_irqs(void)
1249 spin_lock_irqsave(&ioapic_lock, flags); 1241 spin_lock_irqsave(&ioapic_lock, flags);
1250 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); 1242 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
1251 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); 1243 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
1244 set_native_irq_info(irq, TARGET_CPUS);
1252 spin_unlock_irqrestore(&ioapic_lock, flags); 1245 spin_unlock_irqrestore(&ioapic_lock, flags);
1253 } 1246 }
1254 } 1247 }
@@ -1944,6 +1937,7 @@ static void ack_edge_ioapic_vector(unsigned int vector)
1944{ 1937{
1945 int irq = vector_to_irq(vector); 1938 int irq = vector_to_irq(vector);
1946 1939
1940 move_irq(vector);
1947 ack_edge_ioapic_irq(irq); 1941 ack_edge_ioapic_irq(irq);
1948} 1942}
1949 1943
@@ -1958,6 +1952,7 @@ static void end_level_ioapic_vector (unsigned int vector)
1958{ 1952{
1959 int irq = vector_to_irq(vector); 1953 int irq = vector_to_irq(vector);
1960 1954
1955 move_irq(vector);
1961 end_level_ioapic_irq(irq); 1956 end_level_ioapic_irq(irq);
1962} 1957}
1963 1958
@@ -1975,14 +1970,17 @@ static void unmask_IO_APIC_vector (unsigned int vector)
1975 unmask_IO_APIC_irq(irq); 1970 unmask_IO_APIC_irq(irq);
1976} 1971}
1977 1972
1973#ifdef CONFIG_SMP
1978static void set_ioapic_affinity_vector (unsigned int vector, 1974static void set_ioapic_affinity_vector (unsigned int vector,
1979 cpumask_t cpu_mask) 1975 cpumask_t cpu_mask)
1980{ 1976{
1981 int irq = vector_to_irq(vector); 1977 int irq = vector_to_irq(vector);
1982 1978
1979 set_native_irq_info(vector, cpu_mask);
1983 set_ioapic_affinity_irq(irq, cpu_mask); 1980 set_ioapic_affinity_irq(irq, cpu_mask);
1984} 1981}
1985#endif 1982#endif
1983#endif
1986 1984
1987/* 1985/*
1988 * Level and edge triggered IO-APIC interrupts need different handling, 1986 * Level and edge triggered IO-APIC interrupts need different handling,
@@ -1992,7 +1990,7 @@ static void set_ioapic_affinity_vector (unsigned int vector,
1992 * edge-triggered handler, without risking IRQ storms and other ugly 1990 * edge-triggered handler, without risking IRQ storms and other ugly
1993 * races. 1991 * races.
1994 */ 1992 */
1995static struct hw_interrupt_type ioapic_edge_type = { 1993static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1996 .typename = "IO-APIC-edge", 1994 .typename = "IO-APIC-edge",
1997 .startup = startup_edge_ioapic, 1995 .startup = startup_edge_ioapic,
1998 .shutdown = shutdown_edge_ioapic, 1996 .shutdown = shutdown_edge_ioapic,
@@ -2000,10 +1998,12 @@ static struct hw_interrupt_type ioapic_edge_type = {
2000 .disable = disable_edge_ioapic, 1998 .disable = disable_edge_ioapic,
2001 .ack = ack_edge_ioapic, 1999 .ack = ack_edge_ioapic,
2002 .end = end_edge_ioapic, 2000 .end = end_edge_ioapic,
2001#ifdef CONFIG_SMP
2003 .set_affinity = set_ioapic_affinity, 2002 .set_affinity = set_ioapic_affinity,
2003#endif
2004}; 2004};
2005 2005
2006static struct hw_interrupt_type ioapic_level_type = { 2006static struct hw_interrupt_type ioapic_level_type __read_mostly = {
2007 .typename = "IO-APIC-level", 2007 .typename = "IO-APIC-level",
2008 .startup = startup_level_ioapic, 2008 .startup = startup_level_ioapic,
2009 .shutdown = shutdown_level_ioapic, 2009 .shutdown = shutdown_level_ioapic,
@@ -2011,7 +2011,9 @@ static struct hw_interrupt_type ioapic_level_type = {
2011 .disable = disable_level_ioapic, 2011 .disable = disable_level_ioapic,
2012 .ack = mask_and_ack_level_ioapic, 2012 .ack = mask_and_ack_level_ioapic,
2013 .end = end_level_ioapic, 2013 .end = end_level_ioapic,
2014#ifdef CONFIG_SMP
2014 .set_affinity = set_ioapic_affinity, 2015 .set_affinity = set_ioapic_affinity,
2016#endif
2015}; 2017};
2016 2018
2017static inline void init_IO_APIC_traps(void) 2019static inline void init_IO_APIC_traps(void)
@@ -2074,7 +2076,7 @@ static void ack_lapic_irq (unsigned int irq)
2074 2076
2075static void end_lapic_irq (unsigned int i) { /* nothing */ } 2077static void end_lapic_irq (unsigned int i) { /* nothing */ }
2076 2078
2077static struct hw_interrupt_type lapic_irq_type = { 2079static struct hw_interrupt_type lapic_irq_type __read_mostly = {
2078 .typename = "local-APIC-edge", 2080 .typename = "local-APIC-edge",
2079 .startup = NULL, /* startup_irq() not used for IRQ0 */ 2081 .startup = NULL, /* startup_irq() not used for IRQ0 */
2080 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ 2082 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
@@ -2569,6 +2571,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
2569 spin_lock_irqsave(&ioapic_lock, flags); 2571 spin_lock_irqsave(&ioapic_lock, flags);
2570 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); 2572 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
2571 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); 2573 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
2574 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
2572 spin_unlock_irqrestore(&ioapic_lock, flags); 2575 spin_unlock_irqrestore(&ioapic_lock, flags);
2573 2576
2574 return 0; 2577 return 0;
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index a6d8c45961d3..6345b430b105 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -62,32 +62,32 @@ static inline int is_IF_modifier(kprobe_opcode_t opcode)
62 return 0; 62 return 0;
63} 63}
64 64
65int arch_prepare_kprobe(struct kprobe *p) 65int __kprobes arch_prepare_kprobe(struct kprobe *p)
66{ 66{
67 return 0; 67 return 0;
68} 68}
69 69
70void arch_copy_kprobe(struct kprobe *p) 70void __kprobes arch_copy_kprobe(struct kprobe *p)
71{ 71{
72 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 72 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
73 p->opcode = *p->addr; 73 p->opcode = *p->addr;
74} 74}
75 75
76void arch_arm_kprobe(struct kprobe *p) 76void __kprobes arch_arm_kprobe(struct kprobe *p)
77{ 77{
78 *p->addr = BREAKPOINT_INSTRUCTION; 78 *p->addr = BREAKPOINT_INSTRUCTION;
79 flush_icache_range((unsigned long) p->addr, 79 flush_icache_range((unsigned long) p->addr,
80 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 80 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
81} 81}
82 82
83void arch_disarm_kprobe(struct kprobe *p) 83void __kprobes arch_disarm_kprobe(struct kprobe *p)
84{ 84{
85 *p->addr = p->opcode; 85 *p->addr = p->opcode;
86 flush_icache_range((unsigned long) p->addr, 86 flush_icache_range((unsigned long) p->addr,
87 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 87 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
88} 88}
89 89
90void arch_remove_kprobe(struct kprobe *p) 90void __kprobes arch_remove_kprobe(struct kprobe *p)
91{ 91{
92} 92}
93 93
@@ -127,7 +127,8 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
127 regs->eip = (unsigned long)&p->ainsn.insn; 127 regs->eip = (unsigned long)&p->ainsn.insn;
128} 128}
129 129
130void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 130void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
131 struct pt_regs *regs)
131{ 132{
132 unsigned long *sara = (unsigned long *)&regs->esp; 133 unsigned long *sara = (unsigned long *)&regs->esp;
133 struct kretprobe_instance *ri; 134 struct kretprobe_instance *ri;
@@ -150,7 +151,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
150 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 151 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
151 * remain disabled thorough out this function. 152 * remain disabled thorough out this function.
152 */ 153 */
153static int kprobe_handler(struct pt_regs *regs) 154static int __kprobes kprobe_handler(struct pt_regs *regs)
154{ 155{
155 struct kprobe *p; 156 struct kprobe *p;
156 int ret = 0; 157 int ret = 0;
@@ -176,7 +177,8 @@ static int kprobe_handler(struct pt_regs *regs)
176 Disarm the probe we just hit, and ignore it. */ 177 Disarm the probe we just hit, and ignore it. */
177 p = get_kprobe(addr); 178 p = get_kprobe(addr);
178 if (p) { 179 if (p) {
179 if (kprobe_status == KPROBE_HIT_SS) { 180 if (kprobe_status == KPROBE_HIT_SS &&
181 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
180 regs->eflags &= ~TF_MASK; 182 regs->eflags &= ~TF_MASK;
181 regs->eflags |= kprobe_saved_eflags; 183 regs->eflags |= kprobe_saved_eflags;
182 unlock_kprobes(); 184 unlock_kprobes();
@@ -220,7 +222,10 @@ static int kprobe_handler(struct pt_regs *regs)
220 * either a probepoint or a debugger breakpoint 222 * either a probepoint or a debugger breakpoint
221 * at this address. In either case, no further 223 * at this address. In either case, no further
222 * handling of this interrupt is appropriate. 224 * handling of this interrupt is appropriate.
225 * Back up over the (now missing) int3 and run
226 * the original instruction.
223 */ 227 */
228 regs->eip -= sizeof(kprobe_opcode_t);
224 ret = 1; 229 ret = 1;
225 } 230 }
226 /* Not one of ours: let kernel handle it */ 231 /* Not one of ours: let kernel handle it */
@@ -259,7 +264,7 @@ no_kprobe:
259/* 264/*
260 * Called when we hit the probe point at kretprobe_trampoline 265 * Called when we hit the probe point at kretprobe_trampoline
261 */ 266 */
262int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 267int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
263{ 268{
264 struct kretprobe_instance *ri = NULL; 269 struct kretprobe_instance *ri = NULL;
265 struct hlist_head *head; 270 struct hlist_head *head;
@@ -338,7 +343,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
338 * that is atop the stack is the address following the copied instruction. 343 * that is atop the stack is the address following the copied instruction.
339 * We need to make it the address following the original instruction. 344 * We need to make it the address following the original instruction.
340 */ 345 */
341static void resume_execution(struct kprobe *p, struct pt_regs *regs) 346static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
342{ 347{
343 unsigned long *tos = (unsigned long *)&regs->esp; 348 unsigned long *tos = (unsigned long *)&regs->esp;
344 unsigned long next_eip = 0; 349 unsigned long next_eip = 0;
@@ -444,8 +449,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
444/* 449/*
445 * Wrapper routine to for handling exceptions. 450 * Wrapper routine to for handling exceptions.
446 */ 451 */
447int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 452int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
448 void *data) 453 unsigned long val, void *data)
449{ 454{
450 struct die_args *args = (struct die_args *)data; 455 struct die_args *args = (struct die_args *)data;
451 switch (val) { 456 switch (val) {
@@ -473,7 +478,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
473 return NOTIFY_DONE; 478 return NOTIFY_DONE;
474} 479}
475 480
476int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 481int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
477{ 482{
478 struct jprobe *jp = container_of(p, struct jprobe, kp); 483 struct jprobe *jp = container_of(p, struct jprobe, kp);
479 unsigned long addr; 484 unsigned long addr;
@@ -495,7 +500,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
495 return 1; 500 return 1;
496} 501}
497 502
498void jprobe_return(void) 503void __kprobes jprobe_return(void)
499{ 504{
500 preempt_enable_no_resched(); 505 preempt_enable_no_resched();
501 asm volatile (" xchgl %%ebx,%%esp \n" 506 asm volatile (" xchgl %%ebx,%%esp \n"
@@ -506,7 +511,7 @@ void jprobe_return(void)
506 (jprobe_saved_esp):"memory"); 511 (jprobe_saved_esp):"memory");
507} 512}
508 513
509int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 514int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
510{ 515{
511 u8 *addr = (u8 *) (regs->eip - 1); 516 u8 *addr = (u8 *) (regs->eip - 1);
512 unsigned long stack_addr = (unsigned long)jprobe_saved_esp; 517 unsigned long stack_addr = (unsigned long)jprobe_saved_esp;
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index 8bbdbda07a2d..0178457db721 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -478,6 +478,11 @@ void touch_nmi_watchdog (void)
478 */ 478 */
479 for (i = 0; i < NR_CPUS; i++) 479 for (i = 0; i < NR_CPUS; i++)
480 alert_counter[i] = 0; 480 alert_counter[i] = 0;
481
482 /*
483 * Tickle the softlockup detector too:
484 */
485 touch_softlockup_watchdog();
481} 486}
482 487
483extern void die_nmi(struct pt_regs *, const char *msg); 488extern void die_nmi(struct pt_regs *, const char *msg);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 294bcca985ab..e29fd5aeaf8e 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -82,7 +82,7 @@ EXPORT_SYMBOL(efi_enabled);
82/* cpu data as detected by the assembly code in head.S */ 82/* cpu data as detected by the assembly code in head.S */
83struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 83struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
84/* common cpu data for all cpus */ 84/* common cpu data for all cpus */
85struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; 85struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
86EXPORT_SYMBOL(boot_cpu_data); 86EXPORT_SYMBOL(boot_cpu_data);
87 87
88unsigned long mmu_cr4_features; 88unsigned long mmu_cr4_features;
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index 6f794a78ee1e..eefea7c55008 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -194,10 +194,7 @@ int do_settimeofday(struct timespec *tv)
194 set_normalized_timespec(&xtime, sec, nsec); 194 set_normalized_timespec(&xtime, sec, nsec);
195 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 195 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
196 196
197 time_adjust = 0; /* stop active adjtime() */ 197 ntp_clear();
198 time_status |= STA_UNSYNC;
199 time_maxerror = NTP_PHASE_LIMIT;
200 time_esterror = NTP_PHASE_LIMIT;
201 write_sequnlock_irq(&xtime_lock); 198 write_sequnlock_irq(&xtime_lock);
202 clock_was_set(); 199 clock_was_set();
203 return 0; 200 return 0;
@@ -252,8 +249,7 @@ EXPORT_SYMBOL(profile_pc);
252 * timer_interrupt() needs to keep up the real-time clock, 249 * timer_interrupt() needs to keep up the real-time clock,
253 * as well as call the "do_timer()" routine every clocktick 250 * as well as call the "do_timer()" routine every clocktick
254 */ 251 */
255static inline void do_timer_interrupt(int irq, void *dev_id, 252static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
256 struct pt_regs *regs)
257{ 253{
258#ifdef CONFIG_X86_IO_APIC 254#ifdef CONFIG_X86_IO_APIC
259 if (timer_ack) { 255 if (timer_ack) {
@@ -307,7 +303,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
307 303
308 cur_timer->mark_offset(); 304 cur_timer->mark_offset();
309 305
310 do_timer_interrupt(irq, NULL, regs); 306 do_timer_interrupt(irq, regs);
311 307
312 write_sequnlock(&xtime_lock); 308 write_sequnlock(&xtime_lock);
313 return IRQ_HANDLED; 309 return IRQ_HANDLED;
@@ -348,7 +344,7 @@ static void sync_cmos_clock(unsigned long dummy)
348 * This code is run on a timer. If the clock is set, that timer 344 * This code is run on a timer. If the clock is set, that timer
349 * may not expire at the correct time. Thus, we adjust... 345 * may not expire at the correct time. Thus, we adjust...
350 */ 346 */
351 if ((time_status & STA_UNSYNC) != 0) 347 if (!ntp_synced())
352 /* 348 /*
353 * Not synced, exit, do not restart a timer (if one is 349 * Not synced, exit, do not restart a timer (if one is
354 * running, let it run out). 350 * running, let it run out).
@@ -422,6 +418,7 @@ static int timer_resume(struct sys_device *dev)
422 last_timer->resume(); 418 last_timer->resume();
423 cur_timer = last_timer; 419 cur_timer = last_timer;
424 last_timer = NULL; 420 last_timer = NULL;
421 touch_softlockup_watchdog();
425 return 0; 422 return 0;
426} 423}
427 424
diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c
index 001de97c9e4a..d973a8b681fd 100644
--- a/arch/i386/kernel/timers/timer_hpet.c
+++ b/arch/i386/kernel/timers/timer_hpet.c
@@ -18,8 +18,8 @@
18#include "mach_timer.h" 18#include "mach_timer.h"
19#include <asm/hpet.h> 19#include <asm/hpet.h>
20 20
21static unsigned long __read_mostly hpet_usec_quotient; /* convert hpet clks to usec */ 21static unsigned long hpet_usec_quotient __read_mostly; /* convert hpet clks to usec */
22static unsigned long tsc_hpet_quotient; /* convert tsc to hpet clks */ 22static unsigned long tsc_hpet_quotient __read_mostly; /* convert tsc to hpet clks */
23static unsigned long hpet_last; /* hpet counter value at last tick*/ 23static unsigned long hpet_last; /* hpet counter value at last tick*/
24static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */ 24static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */
25static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */ 25static unsigned long last_tsc_high; /* msb 32 bits of Time Stamp Counter */
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 54629bb5893a..09a58cb6daa7 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -363,8 +363,9 @@ static inline void die_if_kernel(const char * str, struct pt_regs * regs, long e
363 die(str, regs, err); 363 die(str, regs, err);
364} 364}
365 365
366static void do_trap(int trapnr, int signr, char *str, int vm86, 366static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
367 struct pt_regs * regs, long error_code, siginfo_t *info) 367 struct pt_regs * regs, long error_code,
368 siginfo_t *info)
368{ 369{
369 struct task_struct *tsk = current; 370 struct task_struct *tsk = current;
370 tsk->thread.error_code = error_code; 371 tsk->thread.error_code = error_code;
@@ -460,7 +461,8 @@ DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
460DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 461DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
461DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0) 462DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
462 463
463fastcall void do_general_protection(struct pt_regs * regs, long error_code) 464fastcall void __kprobes do_general_protection(struct pt_regs * regs,
465 long error_code)
464{ 466{
465 int cpu = get_cpu(); 467 int cpu = get_cpu();
466 struct tss_struct *tss = &per_cpu(init_tss, cpu); 468 struct tss_struct *tss = &per_cpu(init_tss, cpu);
@@ -657,7 +659,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
657 659
658 ++nmi_count(cpu); 660 ++nmi_count(cpu);
659 661
660 if (!nmi_callback(regs, cpu)) 662 if (!rcu_dereference(nmi_callback)(regs, cpu))
661 default_do_nmi(regs); 663 default_do_nmi(regs);
662 664
663 nmi_exit(); 665 nmi_exit();
@@ -665,7 +667,7 @@ fastcall void do_nmi(struct pt_regs * regs, long error_code)
665 667
666void set_nmi_callback(nmi_callback_t callback) 668void set_nmi_callback(nmi_callback_t callback)
667{ 669{
668 nmi_callback = callback; 670 rcu_assign_pointer(nmi_callback, callback);
669} 671}
670EXPORT_SYMBOL_GPL(set_nmi_callback); 672EXPORT_SYMBOL_GPL(set_nmi_callback);
671 673
@@ -676,7 +678,7 @@ void unset_nmi_callback(void)
676EXPORT_SYMBOL_GPL(unset_nmi_callback); 678EXPORT_SYMBOL_GPL(unset_nmi_callback);
677 679
678#ifdef CONFIG_KPROBES 680#ifdef CONFIG_KPROBES
679fastcall void do_int3(struct pt_regs *regs, long error_code) 681fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
680{ 682{
681 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 683 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
682 == NOTIFY_STOP) 684 == NOTIFY_STOP)
@@ -710,7 +712,7 @@ fastcall void do_int3(struct pt_regs *regs, long error_code)
710 * find every occurrence of the TF bit that could be saved away even 712 * find every occurrence of the TF bit that could be saved away even
711 * by user code) 713 * by user code)
712 */ 714 */
713fastcall void do_debug(struct pt_regs * regs, long error_code) 715fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
714{ 716{
715 unsigned int condition; 717 unsigned int condition;
716 struct task_struct *tsk = current; 718 struct task_struct *tsk = current;
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 761972f8cb6c..13b9c62cbbb4 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -22,6 +22,7 @@ SECTIONS
22 *(.text) 22 *(.text)
23 SCHED_TEXT 23 SCHED_TEXT
24 LOCK_TEXT 24 LOCK_TEXT
25 KPROBES_TEXT
25 *(.fixup) 26 *(.fixup)
26 *(.gnu.warning) 27 *(.gnu.warning)
27 } = 0x9090 28 } = 0x9090
diff --git a/arch/i386/mach-default/topology.c b/arch/i386/mach-default/topology.c
index 23395fff35d1..b64314069e78 100644
--- a/arch/i386/mach-default/topology.c
+++ b/arch/i386/mach-default/topology.c
@@ -76,7 +76,7 @@ static int __init topology_init(void)
76 for_each_online_node(i) 76 for_each_online_node(i)
77 arch_register_node(i); 77 arch_register_node(i);
78 78
79 for_each_cpu(i) 79 for_each_present_cpu(i)
80 arch_register_cpu(i); 80 arch_register_cpu(i);
81 return 0; 81 return 0;
82} 82}
@@ -87,7 +87,7 @@ static int __init topology_init(void)
87{ 87{
88 int i; 88 int i;
89 89
90 for_each_cpu(i) 90 for_each_present_cpu(i)
91 arch_register_cpu(i); 91 arch_register_cpu(i);
92 return 0; 92 return 0;
93} 93}
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 6711ce3f6916..244d8ec66be2 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -37,7 +37,7 @@
37#include <asm/mmzone.h> 37#include <asm/mmzone.h>
38#include <bios_ebda.h> 38#include <bios_ebda.h>
39 39
40struct pglist_data *node_data[MAX_NUMNODES]; 40struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
41EXPORT_SYMBOL(node_data); 41EXPORT_SYMBOL(node_data);
42bootmem_data_t node0_bdata; 42bootmem_data_t node0_bdata;
43 43
@@ -49,8 +49,8 @@ bootmem_data_t node0_bdata;
49 * 2) node_start_pfn - the starting page frame number for a node 49 * 2) node_start_pfn - the starting page frame number for a node
50 * 3) node_end_pfn - the ending page fram number for a node 50 * 3) node_end_pfn - the ending page fram number for a node
51 */ 51 */
52unsigned long node_start_pfn[MAX_NUMNODES]; 52unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
53unsigned long node_end_pfn[MAX_NUMNODES]; 53unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
54 54
55 55
56#ifdef CONFIG_DISCONTIGMEM 56#ifdef CONFIG_DISCONTIGMEM
@@ -66,7 +66,7 @@ unsigned long node_end_pfn[MAX_NUMNODES];
66 * physnode_map[4-7] = 1; 66 * physnode_map[4-7] = 1;
67 * physnode_map[8- ] = -1; 67 * physnode_map[8- ] = -1;
68 */ 68 */
69s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1}; 69s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
70EXPORT_SYMBOL(physnode_map); 70EXPORT_SYMBOL(physnode_map);
71 71
72void memory_present(int nid, unsigned long start, unsigned long end) 72void memory_present(int nid, unsigned long start, unsigned long end)
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 411b8500ad1b..9edd4485b91e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -21,6 +21,7 @@
21#include <linux/vt_kern.h> /* For unblank_screen() */ 21#include <linux/vt_kern.h> /* For unblank_screen() */
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/kprobes.h>
24 25
25#include <asm/system.h> 26#include <asm/system.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
@@ -223,7 +224,8 @@ fastcall void do_invalid_op(struct pt_regs *, unsigned long);
223 * bit 1 == 0 means read, 1 means write 224 * bit 1 == 0 means read, 1 means write
224 * bit 2 == 0 means kernel, 1 means user-mode 225 * bit 2 == 0 means kernel, 1 means user-mode
225 */ 226 */
226fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code) 227fastcall void __kprobes do_page_fault(struct pt_regs *regs,
228 unsigned long error_code)
227{ 229{
228 struct task_struct *tsk; 230 struct task_struct *tsk;
229 struct mm_struct *mm; 231 struct mm_struct *mm;
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 9edfc058b894..2ebaf75f732e 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -393,7 +393,7 @@ void zap_low_mappings (void)
393} 393}
394 394
395static int disable_nx __initdata = 0; 395static int disable_nx __initdata = 0;
396u64 __supported_pte_mask = ~_PAGE_NX; 396u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
397 397
398/* 398/*
399 * noexec = on|off 399 * noexec = on|off
diff --git a/arch/i386/oprofile/init.c b/arch/i386/oprofile/init.c
index c90332de582b..5341d481d92f 100644
--- a/arch/i386/oprofile/init.c
+++ b/arch/i386/oprofile/init.c
@@ -15,9 +15,9 @@
15 * with the NMI mode driver. 15 * with the NMI mode driver.
16 */ 16 */
17 17
18extern int nmi_init(struct oprofile_operations * ops); 18extern int op_nmi_init(struct oprofile_operations * ops);
19extern int nmi_timer_init(struct oprofile_operations * ops); 19extern int op_nmi_timer_init(struct oprofile_operations * ops);
20extern void nmi_exit(void); 20extern void op_nmi_exit(void);
21extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth); 21extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
22 22
23 23
@@ -28,11 +28,11 @@ int __init oprofile_arch_init(struct oprofile_operations * ops)
28 ret = -ENODEV; 28 ret = -ENODEV;
29 29
30#ifdef CONFIG_X86_LOCAL_APIC 30#ifdef CONFIG_X86_LOCAL_APIC
31 ret = nmi_init(ops); 31 ret = op_nmi_init(ops);
32#endif 32#endif
33#ifdef CONFIG_X86_IO_APIC 33#ifdef CONFIG_X86_IO_APIC
34 if (ret < 0) 34 if (ret < 0)
35 ret = nmi_timer_init(ops); 35 ret = op_nmi_timer_init(ops);
36#endif 36#endif
37 ops->backtrace = x86_backtrace; 37 ops->backtrace = x86_backtrace;
38 38
@@ -43,6 +43,6 @@ int __init oprofile_arch_init(struct oprofile_operations * ops)
43void oprofile_arch_exit(void) 43void oprofile_arch_exit(void)
44{ 44{
45#ifdef CONFIG_X86_LOCAL_APIC 45#ifdef CONFIG_X86_LOCAL_APIC
46 nmi_exit(); 46 op_nmi_exit();
47#endif 47#endif
48} 48}
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c
index 255e4702d185..0493e8b8ec49 100644
--- a/arch/i386/oprofile/nmi_int.c
+++ b/arch/i386/oprofile/nmi_int.c
@@ -355,7 +355,7 @@ static int __init ppro_init(char ** cpu_type)
355/* in order to get driverfs right */ 355/* in order to get driverfs right */
356static int using_nmi; 356static int using_nmi;
357 357
358int __init nmi_init(struct oprofile_operations *ops) 358int __init op_nmi_init(struct oprofile_operations *ops)
359{ 359{
360 __u8 vendor = boot_cpu_data.x86_vendor; 360 __u8 vendor = boot_cpu_data.x86_vendor;
361 __u8 family = boot_cpu_data.x86; 361 __u8 family = boot_cpu_data.x86;
@@ -420,7 +420,7 @@ int __init nmi_init(struct oprofile_operations *ops)
420} 420}
421 421
422 422
423void nmi_exit(void) 423void op_nmi_exit(void)
424{ 424{
425 if (using_nmi) 425 if (using_nmi)
426 exit_driverfs(); 426 exit_driverfs();
diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c
index c58d0c14f274..ad93cdd55d63 100644
--- a/arch/i386/oprofile/nmi_timer_int.c
+++ b/arch/i386/oprofile/nmi_timer_int.c
@@ -40,7 +40,7 @@ static void timer_stop(void)
40} 40}
41 41
42 42
43int __init nmi_timer_init(struct oprofile_operations * ops) 43int __init op_nmi_timer_init(struct oprofile_operations * ops)
44{ 44{
45 extern int nmi_active; 45 extern int nmi_active;
46 46
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3deced637f07..17b5dbf8c311 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -434,6 +434,11 @@ config GENERIC_IRQ_PROBE
434 bool 434 bool
435 default y 435 default y
436 436
437config GENERIC_PENDING_IRQ
438 bool
439 depends on GENERIC_HARDIRQS && SMP
440 default y
441
437source "arch/ia64/hp/sim/Kconfig" 442source "arch/ia64/hp/sim/Kconfig"
438 443
439source "arch/ia64/oprofile/Kconfig" 444source "arch/ia64/oprofile/Kconfig"
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 7dcb8582ae0d..b42ec37be51c 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -130,7 +130,7 @@ static void rs_stop(struct tty_struct *tty)
130 130
131static void rs_start(struct tty_struct *tty) 131static void rs_start(struct tty_struct *tty)
132{ 132{
133#if SIMSERIAL_DEBUG 133#ifdef SIMSERIAL_DEBUG
134 printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n", 134 printk("rs_start: tty->stopped=%d tty->hw_stopped=%d tty->flow_stopped=%d\n",
135 tty->stopped, tty->hw_stopped, tty->flow_stopped); 135 tty->stopped, tty->hw_stopped, tty->flow_stopped);
136#endif 136#endif
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 829a6d80711c..0708edb06cc4 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -215,7 +215,7 @@ ia32_syscall_table:
215 data8 sys32_fork 215 data8 sys32_fork
216 data8 sys_read 216 data8 sys_read
217 data8 sys_write 217 data8 sys_write
218 data8 sys32_open /* 5 */ 218 data8 compat_sys_open /* 5 */
219 data8 sys_close 219 data8 sys_close
220 data8 sys32_waitpid 220 data8 sys32_waitpid
221 data8 sys_creat 221 data8 sys_creat
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index c1e20d65dd6c..e29a8a55486a 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -2359,37 +2359,6 @@ sys32_brk (unsigned int brk)
2359 return ret; 2359 return ret;
2360} 2360}
2361 2361
2362/*
2363 * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
2364 */
2365asmlinkage long
2366sys32_open (const char __user * filename, int flags, int mode)
2367{
2368 char * tmp;
2369 int fd, error;
2370
2371 tmp = getname(filename);
2372 fd = PTR_ERR(tmp);
2373 if (!IS_ERR(tmp)) {
2374 fd = get_unused_fd();
2375 if (fd >= 0) {
2376 struct file *f = filp_open(tmp, flags, mode);
2377 error = PTR_ERR(f);
2378 if (IS_ERR(f))
2379 goto out_error;
2380 fd_install(fd, f);
2381 }
2382out:
2383 putname(tmp);
2384 }
2385 return fd;
2386
2387out_error:
2388 put_unused_fd(fd);
2389 fd = error;
2390 goto out;
2391}
2392
2393/* Structure for ia32 emulation on ia64 */ 2362/* Structure for ia32 emulation on ia64 */
2394struct epoll_event32 2363struct epoll_event32
2395{ 2364{
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index b242594be55b..307514f7a282 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
16obj-$(CONFIG_IA64_PALINFO) += palinfo.o 16obj-$(CONFIG_IA64_PALINFO) += palinfo.o
17obj-$(CONFIG_IOSAPIC) += iosapic.o 17obj-$(CONFIG_IOSAPIC) += iosapic.o
18obj-$(CONFIG_MODULES) += module.o 18obj-$(CONFIG_MODULES) += module.o
19obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o 19obj-$(CONFIG_SMP) += smp.o smpboot.o
20obj-$(CONFIG_NUMA) += numa.o 20obj-$(CONFIG_NUMA) += numa.o
21obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 21obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
22obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 22obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
diff --git a/arch/ia64/kernel/domain.c b/arch/ia64/kernel/domain.c
deleted file mode 100644
index bbb8efe126b7..000000000000
--- a/arch/ia64/kernel/domain.c
+++ /dev/null
@@ -1,396 +0,0 @@
1/*
2 * arch/ia64/kernel/domain.c
3 * Architecture specific sched-domains builder.
4 *
5 * Copyright (C) 2004 Jesse Barnes
6 * Copyright (C) 2004 Silicon Graphics, Inc.
7 */
8
9#include <linux/sched.h>
10#include <linux/percpu.h>
11#include <linux/slab.h>
12#include <linux/cpumask.h>
13#include <linux/init.h>
14#include <linux/topology.h>
15#include <linux/nodemask.h>
16
17#define SD_NODES_PER_DOMAIN 16
18
19#ifdef CONFIG_NUMA
20/**
21 * find_next_best_node - find the next node to include in a sched_domain
22 * @node: node whose sched_domain we're building
23 * @used_nodes: nodes already in the sched_domain
24 *
25 * Find the next node to include in a given scheduling domain. Simply
26 * finds the closest node not already in the @used_nodes map.
27 *
28 * Should use nodemask_t.
29 */
30static int find_next_best_node(int node, unsigned long *used_nodes)
31{
32 int i, n, val, min_val, best_node = 0;
33
34 min_val = INT_MAX;
35
36 for (i = 0; i < MAX_NUMNODES; i++) {
37 /* Start at @node */
38 n = (node + i) % MAX_NUMNODES;
39
40 if (!nr_cpus_node(n))
41 continue;
42
43 /* Skip already used nodes */
44 if (test_bit(n, used_nodes))
45 continue;
46
47 /* Simple min distance search */
48 val = node_distance(node, n);
49
50 if (val < min_val) {
51 min_val = val;
52 best_node = n;
53 }
54 }
55
56 set_bit(best_node, used_nodes);
57 return best_node;
58}
59
60/**
61 * sched_domain_node_span - get a cpumask for a node's sched_domain
62 * @node: node whose cpumask we're constructing
63 * @size: number of nodes to include in this span
64 *
65 * Given a node, construct a good cpumask for its sched_domain to span. It
66 * should be one that prevents unnecessary balancing, but also spreads tasks
67 * out optimally.
68 */
69static cpumask_t sched_domain_node_span(int node)
70{
71 int i;
72 cpumask_t span, nodemask;
73 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
74
75 cpus_clear(span);
76 bitmap_zero(used_nodes, MAX_NUMNODES);
77
78 nodemask = node_to_cpumask(node);
79 cpus_or(span, span, nodemask);
80 set_bit(node, used_nodes);
81
82 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
83 int next_node = find_next_best_node(node, used_nodes);
84 nodemask = node_to_cpumask(next_node);
85 cpus_or(span, span, nodemask);
86 }
87
88 return span;
89}
90#endif
91
92/*
93 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
94 * can switch it on easily if needed.
95 */
96#ifdef CONFIG_SCHED_SMT
97static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
98static struct sched_group sched_group_cpus[NR_CPUS];
99static int cpu_to_cpu_group(int cpu)
100{
101 return cpu;
102}
103#endif
104
105static DEFINE_PER_CPU(struct sched_domain, phys_domains);
106static struct sched_group sched_group_phys[NR_CPUS];
107static int cpu_to_phys_group(int cpu)
108{
109#ifdef CONFIG_SCHED_SMT
110 return first_cpu(cpu_sibling_map[cpu]);
111#else
112 return cpu;
113#endif
114}
115
116#ifdef CONFIG_NUMA
117/*
118 * The init_sched_build_groups can't handle what we want to do with node
119 * groups, so roll our own. Now each node has its own list of groups which
120 * gets dynamically allocated.
121 */
122static DEFINE_PER_CPU(struct sched_domain, node_domains);
123static struct sched_group *sched_group_nodes[MAX_NUMNODES];
124
125static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
126static struct sched_group sched_group_allnodes[MAX_NUMNODES];
127
128static int cpu_to_allnodes_group(int cpu)
129{
130 return cpu_to_node(cpu);
131}
132#endif
133
134/*
135 * Build sched domains for a given set of cpus and attach the sched domains
136 * to the individual cpus
137 */
138void build_sched_domains(const cpumask_t *cpu_map)
139{
140 int i;
141
142 /*
143 * Set up domains for cpus specified by the cpu_map.
144 */
145 for_each_cpu_mask(i, *cpu_map) {
146 int group;
147 struct sched_domain *sd = NULL, *p;
148 cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
149
150 cpus_and(nodemask, nodemask, *cpu_map);
151
152#ifdef CONFIG_NUMA
153 if (num_online_cpus()
154 > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
155 sd = &per_cpu(allnodes_domains, i);
156 *sd = SD_ALLNODES_INIT;
157 sd->span = *cpu_map;
158 group = cpu_to_allnodes_group(i);
159 sd->groups = &sched_group_allnodes[group];
160 p = sd;
161 } else
162 p = NULL;
163
164 sd = &per_cpu(node_domains, i);
165 *sd = SD_NODE_INIT;
166 sd->span = sched_domain_node_span(cpu_to_node(i));
167 sd->parent = p;
168 cpus_and(sd->span, sd->span, *cpu_map);
169#endif
170
171 p = sd;
172 sd = &per_cpu(phys_domains, i);
173 group = cpu_to_phys_group(i);
174 *sd = SD_CPU_INIT;
175 sd->span = nodemask;
176 sd->parent = p;
177 sd->groups = &sched_group_phys[group];
178
179#ifdef CONFIG_SCHED_SMT
180 p = sd;
181 sd = &per_cpu(cpu_domains, i);
182 group = cpu_to_cpu_group(i);
183 *sd = SD_SIBLING_INIT;
184 sd->span = cpu_sibling_map[i];
185 cpus_and(sd->span, sd->span, *cpu_map);
186 sd->parent = p;
187 sd->groups = &sched_group_cpus[group];
188#endif
189 }
190
191#ifdef CONFIG_SCHED_SMT
192 /* Set up CPU (sibling) groups */
193 for_each_cpu_mask(i, *cpu_map) {
194 cpumask_t this_sibling_map = cpu_sibling_map[i];
195 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
196 if (i != first_cpu(this_sibling_map))
197 continue;
198
199 init_sched_build_groups(sched_group_cpus, this_sibling_map,
200 &cpu_to_cpu_group);
201 }
202#endif
203
204 /* Set up physical groups */
205 for (i = 0; i < MAX_NUMNODES; i++) {
206 cpumask_t nodemask = node_to_cpumask(i);
207
208 cpus_and(nodemask, nodemask, *cpu_map);
209 if (cpus_empty(nodemask))
210 continue;
211
212 init_sched_build_groups(sched_group_phys, nodemask,
213 &cpu_to_phys_group);
214 }
215
216#ifdef CONFIG_NUMA
217 init_sched_build_groups(sched_group_allnodes, *cpu_map,
218 &cpu_to_allnodes_group);
219
220 for (i = 0; i < MAX_NUMNODES; i++) {
221 /* Set up node groups */
222 struct sched_group *sg, *prev;
223 cpumask_t nodemask = node_to_cpumask(i);
224 cpumask_t domainspan;
225 cpumask_t covered = CPU_MASK_NONE;
226 int j;
227
228 cpus_and(nodemask, nodemask, *cpu_map);
229 if (cpus_empty(nodemask))
230 continue;
231
232 domainspan = sched_domain_node_span(i);
233 cpus_and(domainspan, domainspan, *cpu_map);
234
235 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
236 sched_group_nodes[i] = sg;
237 for_each_cpu_mask(j, nodemask) {
238 struct sched_domain *sd;
239 sd = &per_cpu(node_domains, j);
240 sd->groups = sg;
241 if (sd->groups == NULL) {
242 /* Turn off balancing if we have no groups */
243 sd->flags = 0;
244 }
245 }
246 if (!sg) {
247 printk(KERN_WARNING
248 "Can not alloc domain group for node %d\n", i);
249 continue;
250 }
251 sg->cpu_power = 0;
252 sg->cpumask = nodemask;
253 cpus_or(covered, covered, nodemask);
254 prev = sg;
255
256 for (j = 0; j < MAX_NUMNODES; j++) {
257 cpumask_t tmp, notcovered;
258 int n = (i + j) % MAX_NUMNODES;
259
260 cpus_complement(notcovered, covered);
261 cpus_and(tmp, notcovered, *cpu_map);
262 cpus_and(tmp, tmp, domainspan);
263 if (cpus_empty(tmp))
264 break;
265
266 nodemask = node_to_cpumask(n);
267 cpus_and(tmp, tmp, nodemask);
268 if (cpus_empty(tmp))
269 continue;
270
271 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
272 if (!sg) {
273 printk(KERN_WARNING
274 "Can not alloc domain group for node %d\n", j);
275 break;
276 }
277 sg->cpu_power = 0;
278 sg->cpumask = tmp;
279 cpus_or(covered, covered, tmp);
280 prev->next = sg;
281 prev = sg;
282 }
283 prev->next = sched_group_nodes[i];
284 }
285#endif
286
287 /* Calculate CPU power for physical packages and nodes */
288 for_each_cpu_mask(i, *cpu_map) {
289 int power;
290 struct sched_domain *sd;
291#ifdef CONFIG_SCHED_SMT
292 sd = &per_cpu(cpu_domains, i);
293 power = SCHED_LOAD_SCALE;
294 sd->groups->cpu_power = power;
295#endif
296
297 sd = &per_cpu(phys_domains, i);
298 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
299 (cpus_weight(sd->groups->cpumask)-1) / 10;
300 sd->groups->cpu_power = power;
301
302#ifdef CONFIG_NUMA
303 sd = &per_cpu(allnodes_domains, i);
304 if (sd->groups) {
305 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
306 (cpus_weight(sd->groups->cpumask)-1) / 10;
307 sd->groups->cpu_power = power;
308 }
309#endif
310 }
311
312#ifdef CONFIG_NUMA
313 for (i = 0; i < MAX_NUMNODES; i++) {
314 struct sched_group *sg = sched_group_nodes[i];
315 int j;
316
317 if (sg == NULL)
318 continue;
319next_sg:
320 for_each_cpu_mask(j, sg->cpumask) {
321 struct sched_domain *sd;
322 int power;
323
324 sd = &per_cpu(phys_domains, j);
325 if (j != first_cpu(sd->groups->cpumask)) {
326 /*
327 * Only add "power" once for each
328 * physical package.
329 */
330 continue;
331 }
332 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
333 (cpus_weight(sd->groups->cpumask)-1) / 10;
334
335 sg->cpu_power += power;
336 }
337 sg = sg->next;
338 if (sg != sched_group_nodes[i])
339 goto next_sg;
340 }
341#endif
342
343 /* Attach the domains */
344 for_each_cpu_mask(i, *cpu_map) {
345 struct sched_domain *sd;
346#ifdef CONFIG_SCHED_SMT
347 sd = &per_cpu(cpu_domains, i);
348#else
349 sd = &per_cpu(phys_domains, i);
350#endif
351 cpu_attach_domain(sd, i);
352 }
353}
354/*
355 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
356 */
357void arch_init_sched_domains(const cpumask_t *cpu_map)
358{
359 cpumask_t cpu_default_map;
360
361 /*
362 * Setup mask for cpus without special case scheduling requirements.
363 * For now this just excludes isolated cpus, but could be used to
364 * exclude other special cases in the future.
365 */
366 cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
367
368 build_sched_domains(&cpu_default_map);
369}
370
371void arch_destroy_sched_domains(const cpumask_t *cpu_map)
372{
373#ifdef CONFIG_NUMA
374 int i;
375 for (i = 0; i < MAX_NUMNODES; i++) {
376 cpumask_t nodemask = node_to_cpumask(i);
377 struct sched_group *oldsg, *sg = sched_group_nodes[i];
378
379 cpus_and(nodemask, nodemask, *cpu_map);
380 if (cpus_empty(nodemask))
381 continue;
382
383 if (sg == NULL)
384 continue;
385 sg = sg->next;
386next_sg:
387 oldsg = sg;
388 sg = sg->next;
389 kfree(oldsg);
390 if (oldsg != sched_group_nodes[i])
391 goto next_sg;
392 sched_group_nodes[i] = NULL;
393 }
394#endif
395}
396
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 28f2aadc38d0..205d98028261 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -91,23 +91,8 @@ skip:
91} 91}
92 92
93#ifdef CONFIG_SMP 93#ifdef CONFIG_SMP
94/*
95 * This is updated when the user sets irq affinity via /proc
96 */
97static cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
98static unsigned long pending_irq_redir[BITS_TO_LONGS(NR_IRQS)];
99
100static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; 94static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
101 95
102/*
103 * Arch specific routine for deferred write to iosapic rte to reprogram
104 * intr destination.
105 */
106void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
107{
108 pending_irq_cpumask[irq] = mask_val;
109}
110
111void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 96void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
112{ 97{
113 cpumask_t mask = CPU_MASK_NONE; 98 cpumask_t mask = CPU_MASK_NONE;
@@ -116,32 +101,10 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
116 101
117 if (irq < NR_IRQS) { 102 if (irq < NR_IRQS) {
118 irq_affinity[irq] = mask; 103 irq_affinity[irq] = mask;
104 set_irq_info(irq, mask);
119 irq_redir[irq] = (char) (redir & 0xff); 105 irq_redir[irq] = (char) (redir & 0xff);
120 } 106 }
121} 107}
122
123
124void move_irq(int irq)
125{
126 /* note - we hold desc->lock */
127 cpumask_t tmp;
128 irq_desc_t *desc = irq_descp(irq);
129 int redir = test_bit(irq, pending_irq_redir);
130
131 if (unlikely(!desc->handler->set_affinity))
132 return;
133
134 if (!cpus_empty(pending_irq_cpumask[irq])) {
135 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
136 if (unlikely(!cpus_empty(tmp))) {
137 desc->handler->set_affinity(irq | (redir ? IA64_IRQ_REDIRECTED : 0),
138 pending_irq_cpumask[irq]);
139 }
140 cpus_clear(pending_irq_cpumask[irq]);
141 }
142}
143
144
145#endif /* CONFIG_SMP */ 108#endif /* CONFIG_SMP */
146 109
147#ifdef CONFIG_HOTPLUG_CPU 110#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/ia64/kernel/jprobes.S b/arch/ia64/kernel/jprobes.S
index b7fa3ccd2b0f..2323377e3695 100644
--- a/arch/ia64/kernel/jprobes.S
+++ b/arch/ia64/kernel/jprobes.S
@@ -49,6 +49,7 @@
49 /* 49 /*
50 * void jprobe_break(void) 50 * void jprobe_break(void)
51 */ 51 */
52 .section .kprobes.text, "ax"
52ENTRY(jprobe_break) 53ENTRY(jprobe_break)
53 break.m 0x80300 54 break.m 0x80300
54END(jprobe_break) 55END(jprobe_break)
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 884f5cd27d8a..471086b808a4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -87,12 +87,25 @@ static enum instruction_type bundle_encoding[32][3] = {
87 * is IP relative instruction and update the kprobe 87 * is IP relative instruction and update the kprobe
88 * inst flag accordingly 88 * inst flag accordingly
89 */ 89 */
90static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode, 90static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
91 unsigned long kprobe_inst, struct kprobe *p) 91 uint major_opcode,
92 unsigned long kprobe_inst,
93 struct kprobe *p)
92{ 94{
93 p->ainsn.inst_flag = 0; 95 p->ainsn.inst_flag = 0;
94 p->ainsn.target_br_reg = 0; 96 p->ainsn.target_br_reg = 0;
95 97
98 /* Check for Break instruction
99 * Bits 37:40 Major opcode to be zero
100 * Bits 27:32 X6 to be zero
101 * Bits 32:35 X3 to be zero
102 */
103 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
104 /* is a break instruction */
105 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
106 return;
107 }
108
96 if (bundle_encoding[template][slot] == B) { 109 if (bundle_encoding[template][slot] == B) {
97 switch (major_opcode) { 110 switch (major_opcode) {
98 case INDIRECT_CALL_OPCODE: 111 case INDIRECT_CALL_OPCODE:
@@ -126,8 +139,10 @@ static void update_kprobe_inst_flag(uint template, uint slot, uint major_opcode
126 * Returns 0 if supported 139 * Returns 0 if supported
127 * Returns -EINVAL if unsupported 140 * Returns -EINVAL if unsupported
128 */ 141 */
129static int unsupported_inst(uint template, uint slot, uint major_opcode, 142static int __kprobes unsupported_inst(uint template, uint slot,
130 unsigned long kprobe_inst, struct kprobe *p) 143 uint major_opcode,
144 unsigned long kprobe_inst,
145 struct kprobe *p)
131{ 146{
132 unsigned long addr = (unsigned long)p->addr; 147 unsigned long addr = (unsigned long)p->addr;
133 148
@@ -168,8 +183,9 @@ static int unsupported_inst(uint template, uint slot, uint major_opcode,
168 * on which we are inserting kprobe is cmp instruction 183 * on which we are inserting kprobe is cmp instruction
169 * with ctype as unc. 184 * with ctype as unc.
170 */ 185 */
171static uint is_cmp_ctype_unc_inst(uint template, uint slot, uint major_opcode, 186static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot,
172unsigned long kprobe_inst) 187 uint major_opcode,
188 unsigned long kprobe_inst)
173{ 189{
174 cmp_inst_t cmp_inst; 190 cmp_inst_t cmp_inst;
175 uint ctype_unc = 0; 191 uint ctype_unc = 0;
@@ -201,8 +217,10 @@ out:
201 * In this function we override the bundle with 217 * In this function we override the bundle with
202 * the break instruction at the given slot. 218 * the break instruction at the given slot.
203 */ 219 */
204static void prepare_break_inst(uint template, uint slot, uint major_opcode, 220static void __kprobes prepare_break_inst(uint template, uint slot,
205 unsigned long kprobe_inst, struct kprobe *p) 221 uint major_opcode,
222 unsigned long kprobe_inst,
223 struct kprobe *p)
206{ 224{
207 unsigned long break_inst = BREAK_INST; 225 unsigned long break_inst = BREAK_INST;
208 bundle_t *bundle = &p->ainsn.insn.bundle; 226 bundle_t *bundle = &p->ainsn.insn.bundle;
@@ -271,7 +289,8 @@ static inline int in_ivt_functions(unsigned long addr)
271 && addr < (unsigned long)__end_ivt_text); 289 && addr < (unsigned long)__end_ivt_text);
272} 290}
273 291
274static int valid_kprobe_addr(int template, int slot, unsigned long addr) 292static int __kprobes valid_kprobe_addr(int template, int slot,
293 unsigned long addr)
275{ 294{
276 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { 295 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) {
277 printk(KERN_WARNING "Attempting to insert unaligned kprobe " 296 printk(KERN_WARNING "Attempting to insert unaligned kprobe "
@@ -323,7 +342,7 @@ static void kretprobe_trampoline(void)
323 * - cleanup by marking the instance as unused 342 * - cleanup by marking the instance as unused
324 * - long jump back to the original return address 343 * - long jump back to the original return address
325 */ 344 */
326int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 345int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
327{ 346{
328 struct kretprobe_instance *ri = NULL; 347 struct kretprobe_instance *ri = NULL;
329 struct hlist_head *head; 348 struct hlist_head *head;
@@ -381,7 +400,8 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
381 return 1; 400 return 1;
382} 401}
383 402
384void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 403void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
404 struct pt_regs *regs)
385{ 405{
386 struct kretprobe_instance *ri; 406 struct kretprobe_instance *ri;
387 407
@@ -399,7 +419,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
399 } 419 }
400} 420}
401 421
402int arch_prepare_kprobe(struct kprobe *p) 422int __kprobes arch_prepare_kprobe(struct kprobe *p)
403{ 423{
404 unsigned long addr = (unsigned long) p->addr; 424 unsigned long addr = (unsigned long) p->addr;
405 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 425 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL);
@@ -430,7 +450,7 @@ int arch_prepare_kprobe(struct kprobe *p)
430 return 0; 450 return 0;
431} 451}
432 452
433void arch_arm_kprobe(struct kprobe *p) 453void __kprobes arch_arm_kprobe(struct kprobe *p)
434{ 454{
435 unsigned long addr = (unsigned long)p->addr; 455 unsigned long addr = (unsigned long)p->addr;
436 unsigned long arm_addr = addr & ~0xFULL; 456 unsigned long arm_addr = addr & ~0xFULL;
@@ -439,7 +459,7 @@ void arch_arm_kprobe(struct kprobe *p)
439 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 459 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
440} 460}
441 461
442void arch_disarm_kprobe(struct kprobe *p) 462void __kprobes arch_disarm_kprobe(struct kprobe *p)
443{ 463{
444 unsigned long addr = (unsigned long)p->addr; 464 unsigned long addr = (unsigned long)p->addr;
445 unsigned long arm_addr = addr & ~0xFULL; 465 unsigned long arm_addr = addr & ~0xFULL;
@@ -449,7 +469,7 @@ void arch_disarm_kprobe(struct kprobe *p)
449 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); 469 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
450} 470}
451 471
452void arch_remove_kprobe(struct kprobe *p) 472void __kprobes arch_remove_kprobe(struct kprobe *p)
453{ 473{
454} 474}
455 475
@@ -461,7 +481,7 @@ void arch_remove_kprobe(struct kprobe *p)
461 * to original stack address, handle the case where we need to fixup the 481 * to original stack address, handle the case where we need to fixup the
462 * relative IP address and/or fixup branch register. 482 * relative IP address and/or fixup branch register.
463 */ 483 */
464static void resume_execution(struct kprobe *p, struct pt_regs *regs) 484static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
465{ 485{
466 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL; 486 unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
467 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 487 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
@@ -528,13 +548,16 @@ turn_ss_off:
528 ia64_psr(regs)->ss = 0; 548 ia64_psr(regs)->ss = 0;
529} 549}
530 550
531static void prepare_ss(struct kprobe *p, struct pt_regs *regs) 551static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
532{ 552{
533 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle; 553 unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
534 unsigned long slot = (unsigned long)p->addr & 0xf; 554 unsigned long slot = (unsigned long)p->addr & 0xf;
535 555
536 /* Update instruction pointer (IIP) and slot number (IPSR.ri) */ 556 /* single step inline if break instruction */
537 regs->cr_iip = bundle_addr & ~0xFULL; 557 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
558 regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
559 else
560 regs->cr_iip = bundle_addr & ~0xFULL;
538 561
539 if (slot > 2) 562 if (slot > 2)
540 slot = 0; 563 slot = 0;
@@ -545,7 +568,39 @@ static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
545 ia64_psr(regs)->ss = 1; 568 ia64_psr(regs)->ss = 1;
546} 569}
547 570
548static int pre_kprobes_handler(struct die_args *args) 571static int __kprobes is_ia64_break_inst(struct pt_regs *regs)
572{
573 unsigned int slot = ia64_psr(regs)->ri;
574 unsigned int template, major_opcode;
575 unsigned long kprobe_inst;
576 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip;
577 bundle_t bundle;
578
579 memcpy(&bundle, kprobe_addr, sizeof(bundle_t));
580 template = bundle.quad0.template;
581
582 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */
583 if (slot == 1 && bundle_encoding[template][1] == L)
584 slot++;
585
586 /* Get Kprobe probe instruction at given slot*/
587 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode);
588
589 /* For break instruction,
590 * Bits 37:40 Major opcode to be zero
591 * Bits 27:32 X6 to be zero
592 * Bits 32:35 X3 to be zero
593 */
594 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) {
595 /* Not a break instruction */
596 return 0;
597 }
598
599 /* Is a break instruction */
600 return 1;
601}
602
603static int __kprobes pre_kprobes_handler(struct die_args *args)
549{ 604{
550 struct kprobe *p; 605 struct kprobe *p;
551 int ret = 0; 606 int ret = 0;
@@ -558,7 +613,9 @@ static int pre_kprobes_handler(struct die_args *args)
558 if (kprobe_running()) { 613 if (kprobe_running()) {
559 p = get_kprobe(addr); 614 p = get_kprobe(addr);
560 if (p) { 615 if (p) {
561 if (kprobe_status == KPROBE_HIT_SS) { 616 if ( (kprobe_status == KPROBE_HIT_SS) &&
617 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
618 ia64_psr(regs)->ss = 0;
562 unlock_kprobes(); 619 unlock_kprobes();
563 goto no_kprobe; 620 goto no_kprobe;
564 } 621 }
@@ -592,6 +649,19 @@ static int pre_kprobes_handler(struct die_args *args)
592 p = get_kprobe(addr); 649 p = get_kprobe(addr);
593 if (!p) { 650 if (!p) {
594 unlock_kprobes(); 651 unlock_kprobes();
652 if (!is_ia64_break_inst(regs)) {
653 /*
654 * The breakpoint instruction was removed right
655 * after we hit it. Another cpu has removed
656 * either a probepoint or a debugger breakpoint
657 * at this address. In either case, no further
658 * handling of this interrupt is appropriate.
659 */
660 ret = 1;
661
662 }
663
664 /* Not one of our break, let kernel handle it */
595 goto no_kprobe; 665 goto no_kprobe;
596 } 666 }
597 667
@@ -616,7 +686,7 @@ no_kprobe:
616 return ret; 686 return ret;
617} 687}
618 688
619static int post_kprobes_handler(struct pt_regs *regs) 689static int __kprobes post_kprobes_handler(struct pt_regs *regs)
620{ 690{
621 if (!kprobe_running()) 691 if (!kprobe_running())
622 return 0; 692 return 0;
@@ -641,7 +711,7 @@ out:
641 return 1; 711 return 1;
642} 712}
643 713
644static int kprobes_fault_handler(struct pt_regs *regs, int trapnr) 714static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr)
645{ 715{
646 if (!kprobe_running()) 716 if (!kprobe_running())
647 return 0; 717 return 0;
@@ -659,8 +729,8 @@ static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
659 return 0; 729 return 0;
660} 730}
661 731
662int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 732int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
663 void *data) 733 unsigned long val, void *data)
664{ 734{
665 struct die_args *args = (struct die_args *)data; 735 struct die_args *args = (struct die_args *)data;
666 switch(val) { 736 switch(val) {
@@ -681,7 +751,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
681 return NOTIFY_DONE; 751 return NOTIFY_DONE;
682} 752}
683 753
684int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 754int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
685{ 755{
686 struct jprobe *jp = container_of(p, struct jprobe, kp); 756 struct jprobe *jp = container_of(p, struct jprobe, kp);
687 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 757 unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
@@ -703,7 +773,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
703 return 1; 773 return 1;
704} 774}
705 775
706int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 776int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
707{ 777{
708 *regs = jprobe_saved_regs; 778 *regs = jprobe_saved_regs;
709 return 1; 779 return 1;
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 4440c8343fa4..f970359e7edf 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -15,6 +15,7 @@
15#include <linux/vt_kern.h> /* For unblank_screen() */ 15#include <linux/vt_kern.h> /* For unblank_screen() */
16#include <linux/module.h> /* for EXPORT_SYMBOL */ 16#include <linux/module.h> /* for EXPORT_SYMBOL */
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/kprobes.h>
18 19
19#include <asm/fpswa.h> 20#include <asm/fpswa.h>
20#include <asm/ia32.h> 21#include <asm/ia32.h>
@@ -122,7 +123,7 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
122} 123}
123 124
124void 125void
125ia64_bad_break (unsigned long break_num, struct pt_regs *regs) 126__kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
126{ 127{
127 siginfo_t siginfo; 128 siginfo_t siginfo;
128 int sig, code; 129 int sig, code;
@@ -444,7 +445,7 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
444 return rv; 445 return rv;
445} 446}
446 447
447void 448void __kprobes
448ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, 449ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
449 unsigned long iim, unsigned long itir, long arg5, long arg6, 450 unsigned long iim, unsigned long itir, long arg5, long arg6,
450 long arg7, struct pt_regs regs) 451 long arg7, struct pt_regs regs)
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index a676e79e0681..30d8564e9603 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -48,6 +48,7 @@ SECTIONS
48 *(.text) 48 *(.text)
49 SCHED_TEXT 49 SCHED_TEXT
50 LOCK_TEXT 50 LOCK_TEXT
51 KPROBES_TEXT
51 *(.gnu.linkonce.t*) 52 *(.gnu.linkonce.t*)
52 } 53 }
53 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) 54 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 3e2cfa2c6d39..2a0d27f2f21b 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -20,6 +20,7 @@
20 * 20 *
21 * Note: "in0" and "in1" are preserved for debugging purposes. 21 * Note: "in0" and "in1" are preserved for debugging purposes.
22 */ 22 */
23 .section .kprobes.text,"ax"
23GLOBAL_ENTRY(flush_icache_range) 24GLOBAL_ENTRY(flush_icache_range)
24 25
25 .prologue 26 .prologue
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index ff62551eb3a1..24614869e866 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -9,6 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/smp_lock.h> 10#include <linux/smp_lock.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/kprobes.h>
12 13
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
@@ -76,7 +77,7 @@ mapped_kernel_page_is_present (unsigned long address)
76 return pte_present(pte); 77 return pte_present(pte);
77} 78}
78 79
79void 80void __kprobes
80ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) 81ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
81{ 82{
82 int signal = SIGSEGV, code = SEGV_MAPERR; 83 int signal = SIGSEGV, code = SEGV_MAPERR;
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 4564ed0b5ff3..906622d9f933 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -431,7 +431,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev)
431{ 431{
432 struct sysdata_el *element; 432 struct sysdata_el *element;
433 433
434 element = kcalloc(1, sizeof(struct sysdata_el), GFP_KERNEL); 434 element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
435 if (!element) { 435 if (!element) {
436 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); 436 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
437 return; 437 return;
diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c
index 254fe15c064b..b45db5133f55 100644
--- a/arch/ia64/sn/kernel/tiocx.c
+++ b/arch/ia64/sn/kernel/tiocx.c
@@ -191,7 +191,7 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
191{ 191{
192 struct cx_dev *cx_dev; 192 struct cx_dev *cx_dev;
193 193
194 cx_dev = kcalloc(1, sizeof(struct cx_dev), GFP_KERNEL); 194 cx_dev = kzalloc(sizeof(struct cx_dev), GFP_KERNEL);
195 DBG("cx_dev= 0x%p\n", cx_dev); 195 DBG("cx_dev= 0x%p\n", cx_dev);
196 if (cx_dev == NULL) 196 if (cx_dev == NULL)
197 return -ENOMEM; 197 return -ENOMEM;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index ea09c12f0258..19bced34d5f1 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -148,7 +148,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern)
148 tioca_kern->ca_pcigart_entries = 148 tioca_kern->ca_pcigart_entries =
149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize; 149 tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
150 tioca_kern->ca_pcigart_pagemap = 150 tioca_kern->ca_pcigart_pagemap =
151 kcalloc(1, tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL); 151 kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
152 if (!tioca_kern->ca_pcigart_pagemap) { 152 if (!tioca_kern->ca_pcigart_pagemap) {
153 free_pages((unsigned long)tioca_kern->ca_gart, 153 free_pages((unsigned long)tioca_kern->ca_gart,
154 get_order(tioca_kern->ca_gart_size)); 154 get_order(tioca_kern->ca_gart_size));
@@ -392,7 +392,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
392 * allocate a map struct 392 * allocate a map struct
393 */ 393 */
394 394
395 ca_dmamap = kcalloc(1, sizeof(struct tioca_dmamap), GFP_ATOMIC); 395 ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
396 if (!ca_dmamap) 396 if (!ca_dmamap)
397 goto map_return; 397 goto map_return;
398 398
@@ -600,7 +600,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
600 * Allocate kernel bus soft and copy from prom. 600 * Allocate kernel bus soft and copy from prom.
601 */ 601 */
602 602
603 tioca_common = kcalloc(1, sizeof(struct tioca_common), GFP_KERNEL); 603 tioca_common = kzalloc(sizeof(struct tioca_common), GFP_KERNEL);
604 if (!tioca_common) 604 if (!tioca_common)
605 return NULL; 605 return NULL;
606 606
@@ -609,7 +609,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
609 609
610 /* init kernel-private area */ 610 /* init kernel-private area */
611 611
612 tioca_kern = kcalloc(1, sizeof(struct tioca_kernel), GFP_KERNEL); 612 tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
613 if (!tioca_kern) { 613 if (!tioca_kern) {
614 kfree(tioca_common); 614 kfree(tioca_common);
615 return NULL; 615 return NULL;
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c
index 8a2b77bc5749..539c562cd54d 100644
--- a/arch/m32r/kernel/time.c
+++ b/arch/m32r/kernel/time.c
@@ -171,10 +171,7 @@ int do_settimeofday(struct timespec *tv)
171 set_normalized_timespec(&xtime, sec, nsec); 171 set_normalized_timespec(&xtime, sec, nsec);
172 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 172 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
173 173
174 time_adjust = 0; /* stop active adjtime() */ 174 ntp_clear();
175 time_status |= STA_UNSYNC;
176 time_maxerror = NTP_PHASE_LIMIT;
177 time_esterror = NTP_PHASE_LIMIT;
178 write_sequnlock_irq(&xtime_lock); 175 write_sequnlock_irq(&xtime_lock);
179 clock_was_set(); 176 clock_was_set();
180 177
@@ -221,7 +218,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
221 * called as close as possible to 500 ms before the new second starts. 218 * called as close as possible to 500 ms before the new second starts.
222 */ 219 */
223 write_seqlock(&xtime_lock); 220 write_seqlock(&xtime_lock);
224 if ((time_status & STA_UNSYNC) == 0 221 if (ntp_synced()
225 && xtime.tv_sec > last_rtc_update + 660 222 && xtime.tv_sec > last_rtc_update + 660
226 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2 223 && (xtime.tv_nsec / 1000) >= 500000 - ((unsigned)TICK_SIZE) / 2
227 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2) 224 && (xtime.tv_nsec / 1000) <= 500000 + ((unsigned)TICK_SIZE) / 2)
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 178c4a3fbb72..ba960bbc8e6d 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -25,6 +25,11 @@ config GENERIC_CALIBRATE_DELAY
25 bool 25 bool
26 default y 26 default y
27 27
28config ARCH_MAY_HAVE_PC_FDC
29 bool
30 depends on Q40 || (BROKEN && SUN3X)
31 default y
32
28mainmenu "Linux/68k Kernel Configuration" 33mainmenu "Linux/68k Kernel Configuration"
29 34
30source "init/Kconfig" 35source "init/Kconfig"
diff --git a/arch/m68k/bvme6000/rtc.c b/arch/m68k/bvme6000/rtc.c
index c6b2a410bf9a..eb63ca6ed94c 100644
--- a/arch/m68k/bvme6000/rtc.c
+++ b/arch/m68k/bvme6000/rtc.c
@@ -14,6 +14,7 @@
14#include <linux/fcntl.h> 14#include <linux/fcntl.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/poll.h> 16#include <linux/poll.h>
17#include <linux/module.h>
17#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */ 18#include <linux/mc146818rtc.h> /* For struct rtc_time and ioctls, etc */
18#include <linux/smp_lock.h> 19#include <linux/smp_lock.h>
19#include <asm/bvme6000hw.h> 20#include <asm/bvme6000hw.h>
@@ -171,7 +172,7 @@ static struct miscdevice rtc_dev = {
171 .fops = &rtc_fops 172 .fops = &rtc_fops
172}; 173};
173 174
174int __init rtc_DP8570A_init(void) 175static int __init rtc_DP8570A_init(void)
175{ 176{
176 if (!MACH_IS_BVME6000) 177 if (!MACH_IS_BVME6000)
177 return -ENODEV; 178 return -ENODEV;
@@ -179,4 +180,4 @@ int __init rtc_DP8570A_init(void)
179 printk(KERN_INFO "DP8570A Real Time Clock Driver v%s\n", RTC_VERSION); 180 printk(KERN_INFO "DP8570A Real Time Clock Driver v%s\n", RTC_VERSION);
180 return misc_register(&rtc_dev); 181 return misc_register(&rtc_dev);
181} 182}
182 183module_init(rtc_DP8570A_init);
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index e47e19588525..4ec95e3cb874 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -166,10 +166,7 @@ int do_settimeofday(struct timespec *tv)
166 set_normalized_timespec(&xtime, sec, nsec); 166 set_normalized_timespec(&xtime, sec, nsec);
167 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 167 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
168 168
169 time_adjust = 0; /* stop active adjtime() */ 169 ntp_clear();
170 time_status |= STA_UNSYNC;
171 time_maxerror = NTP_PHASE_LIMIT;
172 time_esterror = NTP_PHASE_LIMIT;
173 write_sequnlock_irq(&xtime_lock); 170 write_sequnlock_irq(&xtime_lock);
174 clock_was_set(); 171 clock_was_set();
175 return 0; 172 return 0;
diff --git a/arch/m68k/mvme16x/rtc.c b/arch/m68k/mvme16x/rtc.c
index 8a2425069088..7977eae50af2 100644
--- a/arch/m68k/mvme16x/rtc.c
+++ b/arch/m68k/mvme16x/rtc.c
@@ -161,7 +161,7 @@ static struct miscdevice rtc_dev=
161 .fops = &rtc_fops 161 .fops = &rtc_fops
162}; 162};
163 163
164int __init rtc_MK48T08_init(void) 164static int __init rtc_MK48T08_init(void)
165{ 165{
166 if (!MACH_IS_MVME16x) 166 if (!MACH_IS_MVME16x)
167 return -ENODEV; 167 return -ENODEV;
@@ -169,4 +169,4 @@ int __init rtc_MK48T08_init(void)
169 printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION); 169 printk(KERN_INFO "MK48T08 Real Time Clock Driver v%s\n", RTC_VERSION);
170 return misc_register(&rtc_dev); 170 return misc_register(&rtc_dev);
171} 171}
172 172module_init(rtc_MK48T08_init);
diff --git a/arch/m68knommu/kernel/time.c b/arch/m68knommu/kernel/time.c
index 5c3ca671627c..b17c1ecba966 100644
--- a/arch/m68knommu/kernel/time.c
+++ b/arch/m68knommu/kernel/time.c
@@ -68,7 +68,7 @@ static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
68 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 68 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
69 * called as close as possible to 500 ms before the new second starts. 69 * called as close as possible to 500 ms before the new second starts.
70 */ 70 */
71 if ((time_status & STA_UNSYNC) == 0 && 71 if (ntp_synced() &&
72 xtime.tv_sec > last_rtc_update + 660 && 72 xtime.tv_sec > last_rtc_update + 660 &&
73 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 73 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
74 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 74 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -178,10 +178,7 @@ int do_settimeofday(struct timespec *tv)
178 set_normalized_timespec(&xtime, sec, nsec); 178 set_normalized_timespec(&xtime, sec, nsec);
179 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 179 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
180 180
181 time_adjust = 0; /* stop active adjtime() */ 181 ntp_clear();
182 time_status |= STA_UNSYNC;
183 time_maxerror = NTP_PHASE_LIMIT;
184 time_esterror = NTP_PHASE_LIMIT;
185 write_sequnlock_irq(&xtime_lock); 182 write_sequnlock_irq(&xtime_lock);
186 clock_was_set(); 183 clock_was_set();
187 return 0; 184 return 0;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d79fba0aa8bf..8d76eb1ff291 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,6 +4,11 @@ config MIPS
4 # Horrible source of confusion. Die, die, die ... 4 # Horrible source of confusion. Die, die, die ...
5 select EMBEDDED 5 select EMBEDDED
6 6
7# shouldn't it be per-subarchitecture?
8config ARCH_MAY_HAVE_PC_FDC
9 bool
10 default y
11
7mainmenu "Linux/MIPS Kernel Configuration" 12mainmenu "Linux/MIPS Kernel Configuration"
8 13
9source "init/Kconfig" 14source "init/Kconfig"
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 4613219dd73e..ece4564919d8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -546,20 +546,20 @@ struct msgbuf32 { s32 mtype; char mtext[1]; };
546struct ipc_perm32 546struct ipc_perm32
547{ 547{
548 key_t key; 548 key_t key;
549 compat_uid_t uid; 549 __compat_uid_t uid;
550 compat_gid_t gid; 550 __compat_gid_t gid;
551 compat_uid_t cuid; 551 __compat_uid_t cuid;
552 compat_gid_t cgid; 552 __compat_gid_t cgid;
553 compat_mode_t mode; 553 compat_mode_t mode;
554 unsigned short seq; 554 unsigned short seq;
555}; 555};
556 556
557struct ipc64_perm32 { 557struct ipc64_perm32 {
558 key_t key; 558 key_t key;
559 compat_uid_t uid; 559 __compat_uid_t uid;
560 compat_gid_t gid; 560 __compat_gid_t gid;
561 compat_uid_t cuid; 561 __compat_uid_t cuid;
562 compat_gid_t cgid; 562 __compat_gid_t cgid;
563 compat_mode_t mode; 563 compat_mode_t mode;
564 unsigned short seq; 564 unsigned short seq;
565 unsigned short __pad1; 565 unsigned short __pad1;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index f3bf0e43b8bb..b46595462717 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -632,10 +632,7 @@ asmlinkage int irix_stime(int value)
632 write_seqlock_irq(&xtime_lock); 632 write_seqlock_irq(&xtime_lock);
633 xtime.tv_sec = value; 633 xtime.tv_sec = value;
634 xtime.tv_nsec = 0; 634 xtime.tv_nsec = 0;
635 time_adjust = 0; /* stop active adjtime() */ 635 ntp_clear();
636 time_status |= STA_UNSYNC;
637 time_maxerror = NTP_PHASE_LIMIT;
638 time_esterror = NTP_PHASE_LIMIT;
639 write_sequnlock_irq(&xtime_lock); 636 write_sequnlock_irq(&xtime_lock);
640 637
641 return 0; 638 return 0;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 648c82292ed6..0dd0df7a3b04 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -223,10 +223,7 @@ int do_settimeofday(struct timespec *tv)
223 set_normalized_timespec(&xtime, sec, nsec); 223 set_normalized_timespec(&xtime, sec, nsec);
224 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 224 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
225 225
226 time_adjust = 0; /* stop active adjtime() */ 226 ntp_clear();
227 time_status |= STA_UNSYNC;
228 time_maxerror = NTP_PHASE_LIMIT;
229 time_esterror = NTP_PHASE_LIMIT;
230 227
231 write_sequnlock_irq(&xtime_lock); 228 write_sequnlock_irq(&xtime_lock);
232 clock_was_set(); 229 clock_was_set();
@@ -442,7 +439,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
442 * called as close as possible to 500 ms before the new second starts. 439 * called as close as possible to 500 ms before the new second starts.
443 */ 440 */
444 write_seqlock(&xtime_lock); 441 write_seqlock(&xtime_lock);
445 if ((time_status & STA_UNSYNC) == 0 && 442 if (ntp_synced() &&
446 xtime.tv_sec > last_rtc_update + 660 && 443 xtime.tv_sec > last_rtc_update + 660 &&
447 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 444 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
448 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 445 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 8c1b96fffa76..cddf1cedf007 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -118,7 +118,7 @@ again:
118 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 118 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
119 * called as close as possible to when a second starts. 119 * called as close as possible to when a second starts.
120 */ 120 */
121 if ((time_status & STA_UNSYNC) == 0 && 121 if (ntp_synced() &&
122 xtime.tv_sec > last_rtc_update + 660 && 122 xtime.tv_sec > last_rtc_update + 660 &&
123 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 123 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
124 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 124 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 1c2d87435233..0b07922a2ac6 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -49,6 +49,10 @@ config ISA_DMA_API
49 bool 49 bool
50 default y 50 default y
51 51
52config ARCH_MAY_HAVE_PC_FDC
53 bool
54 default y
55
52source "init/Kconfig" 56source "init/Kconfig"
53 57
54 58
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 6cf7407344ba..7ff67f8e9f8c 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -188,10 +188,7 @@ do_settimeofday (struct timespec *tv)
188 set_normalized_timespec(&xtime, sec, nsec); 188 set_normalized_timespec(&xtime, sec, nsec);
189 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 189 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
190 190
191 time_adjust = 0; /* stop active adjtime() */ 191 ntp_clear();
192 time_status |= STA_UNSYNC;
193 time_maxerror = NTP_PHASE_LIMIT;
194 time_esterror = NTP_PHASE_LIMIT;
195 } 192 }
196 write_sequnlock_irq(&xtime_lock); 193 write_sequnlock_irq(&xtime_lock);
197 clock_was_set(); 194 clock_was_set();
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 36dee0ff5ca0..6ab7e5ea5fcf 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -47,6 +47,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
47 bool 47 bool
48 default y 48 default y
49 49
50config ARCH_MAY_HAVE_PC_FDC
51 bool
52 default y
53
50source "init/Kconfig" 54source "init/Kconfig"
51 55
52menu "Processor" 56menu "Processor"
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index bf4ddca5e853..a3c5281a5d2d 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -169,7 +169,7 @@ void timer_interrupt(struct pt_regs * regs)
169 * We should have an rtc call that only sets the minutes and 169 * We should have an rtc call that only sets the minutes and
170 * seconds like on Intel to avoid problems with non UTC clocks. 170 * seconds like on Intel to avoid problems with non UTC clocks.
171 */ 171 */
172 if ( ppc_md.set_rtc_time && (time_status & STA_UNSYNC) == 0 && 172 if ( ppc_md.set_rtc_time && ntp_synced() &&
173 xtime.tv_sec - last_rtc_update >= 659 && 173 xtime.tv_sec - last_rtc_update >= 659 &&
174 abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ && 174 abs((xtime.tv_nsec / 1000) - (1000000-1000000/HZ)) < 500000/HZ &&
175 jiffies - wall_jiffies == 1) { 175 jiffies - wall_jiffies == 1) {
@@ -271,10 +271,7 @@ int do_settimeofday(struct timespec *tv)
271 */ 271 */
272 last_rtc_update = new_sec - 658; 272 last_rtc_update = new_sec - 658;
273 273
274 time_adjust = 0; /* stop active adjtime() */ 274 ntp_clear();
275 time_status |= STA_UNSYNC;
276 time_maxerror = NTP_PHASE_LIMIT;
277 time_esterror = NTP_PHASE_LIMIT;
278 write_sequnlock_irqrestore(&xtime_lock, flags); 275 write_sequnlock_irqrestore(&xtime_lock, flags);
279 clock_was_set(); 276 clock_was_set();
280 return 0; 277 return 0;
diff --git a/arch/ppc/syslib/ocp.c b/arch/ppc/syslib/ocp.c
index e5fd2ae503ea..9ccce438bd7a 100644
--- a/arch/ppc/syslib/ocp.c
+++ b/arch/ppc/syslib/ocp.c
@@ -165,7 +165,7 @@ ocp_device_remove(struct device *dev)
165} 165}
166 166
167static int 167static int
168ocp_device_suspend(struct device *dev, u32 state) 168ocp_device_suspend(struct device *dev, pm_message_t state)
169{ 169{
170 struct ocp_device *ocp_dev = to_ocp_dev(dev); 170 struct ocp_device *ocp_dev = to_ocp_dev(dev);
171 struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver); 171 struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver);
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 13b262f10216..deca68ad644a 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -44,6 +44,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
44 bool 44 bool
45 default y 45 default y
46 46
47config ARCH_MAY_HAVE_PC_FDC
48 bool
49 default y
50
47# We optimistically allocate largepages from the VM, so make the limit 51# We optimistically allocate largepages from the VM, so make the limit
48# large enough (16MB). This badly named config option is actually 52# large enough (16MB). This badly named config option is actually
49# max order + 1 53# max order + 1
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile
index 6350cce82efb..8189953a372c 100644
--- a/arch/ppc64/Makefile
+++ b/arch/ppc64/Makefile
@@ -49,7 +49,7 @@ NM := $(NM) --synthetic
49 49
50endif 50endif
51 51
52CHECKFLAGS += -m64 -D__powerpc__ 52CHECKFLAGS += -m64 -D__powerpc__ -D__powerpc64__
53 53
54LDFLAGS := -m elf64ppc 54LDFLAGS := -m elf64ppc
55LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD) 55LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index a3d519518fb8..7e80d49c589a 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -44,7 +44,7 @@ static struct kprobe *kprobe_prev;
44static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; 44static unsigned long kprobe_status_prev, kprobe_saved_msr_prev;
45static struct pt_regs jprobe_saved_regs; 45static struct pt_regs jprobe_saved_regs;
46 46
47int arch_prepare_kprobe(struct kprobe *p) 47int __kprobes arch_prepare_kprobe(struct kprobe *p)
48{ 48{
49 int ret = 0; 49 int ret = 0;
50 kprobe_opcode_t insn = *p->addr; 50 kprobe_opcode_t insn = *p->addr;
@@ -68,27 +68,27 @@ int arch_prepare_kprobe(struct kprobe *p)
68 return ret; 68 return ret;
69} 69}
70 70
71void arch_copy_kprobe(struct kprobe *p) 71void __kprobes arch_copy_kprobe(struct kprobe *p)
72{ 72{
73 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 73 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
74 p->opcode = *p->addr; 74 p->opcode = *p->addr;
75} 75}
76 76
77void arch_arm_kprobe(struct kprobe *p) 77void __kprobes arch_arm_kprobe(struct kprobe *p)
78{ 78{
79 *p->addr = BREAKPOINT_INSTRUCTION; 79 *p->addr = BREAKPOINT_INSTRUCTION;
80 flush_icache_range((unsigned long) p->addr, 80 flush_icache_range((unsigned long) p->addr,
81 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 81 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
82} 82}
83 83
84void arch_disarm_kprobe(struct kprobe *p) 84void __kprobes arch_disarm_kprobe(struct kprobe *p)
85{ 85{
86 *p->addr = p->opcode; 86 *p->addr = p->opcode;
87 flush_icache_range((unsigned long) p->addr, 87 flush_icache_range((unsigned long) p->addr,
88 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 88 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
89} 89}
90 90
91void arch_remove_kprobe(struct kprobe *p) 91void __kprobes arch_remove_kprobe(struct kprobe *p)
92{ 92{
93 up(&kprobe_mutex); 93 up(&kprobe_mutex);
94 free_insn_slot(p->ainsn.insn); 94 free_insn_slot(p->ainsn.insn);
@@ -102,7 +102,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
102 regs->msr |= MSR_SE; 102 regs->msr |= MSR_SE;
103 103
104 /* single step inline if it is a trap variant */ 104 /* single step inline if it is a trap variant */
105 if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn)) 105 if (is_trap(insn))
106 regs->nip = (unsigned long)p->addr; 106 regs->nip = (unsigned long)p->addr;
107 else 107 else
108 regs->nip = (unsigned long)p->ainsn.insn; 108 regs->nip = (unsigned long)p->ainsn.insn;
@@ -122,7 +122,8 @@ static inline void restore_previous_kprobe(void)
122 kprobe_saved_msr = kprobe_saved_msr_prev; 122 kprobe_saved_msr = kprobe_saved_msr_prev;
123} 123}
124 124
125void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 125void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
126 struct pt_regs *regs)
126{ 127{
127 struct kretprobe_instance *ri; 128 struct kretprobe_instance *ri;
128 129
@@ -151,7 +152,9 @@ static inline int kprobe_handler(struct pt_regs *regs)
151 Disarm the probe we just hit, and ignore it. */ 152 Disarm the probe we just hit, and ignore it. */
152 p = get_kprobe(addr); 153 p = get_kprobe(addr);
153 if (p) { 154 if (p) {
154 if (kprobe_status == KPROBE_HIT_SS) { 155 kprobe_opcode_t insn = *p->ainsn.insn;
156 if (kprobe_status == KPROBE_HIT_SS &&
157 is_trap(insn)) {
155 regs->msr &= ~MSR_SE; 158 regs->msr &= ~MSR_SE;
156 regs->msr |= kprobe_saved_msr; 159 regs->msr |= kprobe_saved_msr;
157 unlock_kprobes(); 160 unlock_kprobes();
@@ -191,8 +194,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
191 * trap variant, it could belong to someone else 194 * trap variant, it could belong to someone else
192 */ 195 */
193 kprobe_opcode_t cur_insn = *addr; 196 kprobe_opcode_t cur_insn = *addr;
194 if (IS_TW(cur_insn) || IS_TD(cur_insn) || 197 if (is_trap(cur_insn))
195 IS_TWI(cur_insn) || IS_TDI(cur_insn))
196 goto no_kprobe; 198 goto no_kprobe;
197 /* 199 /*
198 * The breakpoint instruction was removed right 200 * The breakpoint instruction was removed right
@@ -244,7 +246,7 @@ void kretprobe_trampoline_holder(void)
244/* 246/*
245 * Called when the probe at kretprobe trampoline is hit 247 * Called when the probe at kretprobe trampoline is hit
246 */ 248 */
247int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 249int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
248{ 250{
249 struct kretprobe_instance *ri = NULL; 251 struct kretprobe_instance *ri = NULL;
250 struct hlist_head *head; 252 struct hlist_head *head;
@@ -308,7 +310,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
308 * single-stepped a copy of the instruction. The address of this 310 * single-stepped a copy of the instruction. The address of this
309 * copy is p->ainsn.insn. 311 * copy is p->ainsn.insn.
310 */ 312 */
311static void resume_execution(struct kprobe *p, struct pt_regs *regs) 313static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
312{ 314{
313 int ret; 315 int ret;
314 unsigned int insn = *p->ainsn.insn; 316 unsigned int insn = *p->ainsn.insn;
@@ -373,8 +375,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
373/* 375/*
374 * Wrapper routine to for handling exceptions. 376 * Wrapper routine to for handling exceptions.
375 */ 377 */
376int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 378int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
377 void *data) 379 unsigned long val, void *data)
378{ 380{
379 struct die_args *args = (struct die_args *)data; 381 struct die_args *args = (struct die_args *)data;
380 int ret = NOTIFY_DONE; 382 int ret = NOTIFY_DONE;
@@ -402,11 +404,11 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
402 default: 404 default:
403 break; 405 break;
404 } 406 }
405 preempt_enable(); 407 preempt_enable_no_resched();
406 return ret; 408 return ret;
407} 409}
408 410
409int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 411int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
410{ 412{
411 struct jprobe *jp = container_of(p, struct jprobe, kp); 413 struct jprobe *jp = container_of(p, struct jprobe, kp);
412 414
@@ -419,16 +421,16 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
419 return 1; 421 return 1;
420} 422}
421 423
422void jprobe_return(void) 424void __kprobes jprobe_return(void)
423{ 425{
424 asm volatile("trap" ::: "memory"); 426 asm volatile("trap" ::: "memory");
425} 427}
426 428
427void jprobe_return_end(void) 429void __kprobes jprobe_return_end(void)
428{ 430{
429}; 431};
430 432
431int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 433int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
432{ 434{
433 /* 435 /*
434 * FIXME - we should ideally be validating that we got here 'cos 436 * FIXME - we should ideally be validating that we got here 'cos
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 474df0a862bf..6d860c1d9fa0 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -183,7 +183,7 @@ PPC64_CACHES:
183 * flush all bytes from start through stop-1 inclusive 183 * flush all bytes from start through stop-1 inclusive
184 */ 184 */
185 185
186_GLOBAL(__flush_icache_range) 186_KPROBE(__flush_icache_range)
187 187
188/* 188/*
189 * Flush the data cache to memory 189 * Flush the data cache to memory
@@ -223,7 +223,7 @@ _GLOBAL(__flush_icache_range)
223 bdnz 2b 223 bdnz 2b
224 isync 224 isync
225 blr 225 blr
226 226 .previous .text
227/* 227/*
228 * Like above, but only do the D-cache. 228 * Like above, but only do the D-cache.
229 * 229 *
@@ -957,7 +957,7 @@ _GLOBAL(sys_call_table32)
957 .llong .ppc_fork 957 .llong .ppc_fork
958 .llong .sys_read 958 .llong .sys_read
959 .llong .sys_write 959 .llong .sys_write
960 .llong .sys32_open /* 5 */ 960 .llong .compat_sys_open /* 5 */
961 .llong .sys_close 961 .llong .sys_close
962 .llong .sys32_waitpid 962 .llong .sys32_waitpid
963 .llong .sys32_creat 963 .llong .sys32_creat
diff --git a/arch/ppc64/kernel/pSeries_reconfig.c b/arch/ppc64/kernel/pSeries_reconfig.c
index dc2a69d412a2..58c61219d08e 100644
--- a/arch/ppc64/kernel/pSeries_reconfig.c
+++ b/arch/ppc64/kernel/pSeries_reconfig.c
@@ -111,7 +111,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
111 struct device_node *np; 111 struct device_node *np;
112 int err = -ENOMEM; 112 int err = -ENOMEM;
113 113
114 np = kcalloc(1, sizeof(*np), GFP_KERNEL); 114 np = kzalloc(sizeof(*np), GFP_KERNEL);
115 if (!np) 115 if (!np)
116 goto out_err; 116 goto out_err;
117 117
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index 206619080e66..214914a95a50 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -867,37 +867,6 @@ off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
867 return sys_lseek(fd, (int)offset, origin); 867 return sys_lseek(fd, (int)offset, origin);
868} 868}
869 869
870/*
871 * This is just a version for 32-bit applications which does
872 * not force O_LARGEFILE on.
873 */
874asmlinkage long sys32_open(const char __user * filename, int flags, int mode)
875{
876 char * tmp;
877 int fd, error;
878
879 tmp = getname(filename);
880 fd = PTR_ERR(tmp);
881 if (!IS_ERR(tmp)) {
882 fd = get_unused_fd();
883 if (fd >= 0) {
884 struct file * f = filp_open(tmp, flags, mode);
885 error = PTR_ERR(f);
886 if (IS_ERR(f))
887 goto out_error;
888 fd_install(fd, f);
889 }
890out:
891 putname(tmp);
892 }
893 return fd;
894
895out_error:
896 put_unused_fd(fd);
897 fd = error;
898 goto out;
899}
900
901/* Note: it is necessary to treat bufsiz as an unsigned int, 870/* Note: it is necessary to treat bufsiz as an unsigned int,
902 * with the corresponding cast to a signed int to insure that the 871 * with the corresponding cast to a signed int to insure that the
903 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode) 872 * proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 91ef95ccda4f..9939c206afa4 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -128,7 +128,7 @@ static __inline__ void timer_check_rtc(void)
128 * We should have an rtc call that only sets the minutes and 128 * We should have an rtc call that only sets the minutes and
129 * seconds like on Intel to avoid problems with non UTC clocks. 129 * seconds like on Intel to avoid problems with non UTC clocks.
130 */ 130 */
131 if ( (time_status & STA_UNSYNC) == 0 && 131 if (ntp_synced() &&
132 xtime.tv_sec - last_rtc_update >= 659 && 132 xtime.tv_sec - last_rtc_update >= 659 &&
133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && 133 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
134 jiffies - wall_jiffies == 1) { 134 jiffies - wall_jiffies == 1) {
@@ -435,10 +435,7 @@ int do_settimeofday(struct timespec *tv)
435 */ 435 */
436 last_rtc_update = new_sec - 658; 436 last_rtc_update = new_sec - 658;
437 437
438 time_adjust = 0; /* stop active adjtime() */ 438 ntp_clear();
439 time_status |= STA_UNSYNC;
440 time_maxerror = NTP_PHASE_LIMIT;
441 time_esterror = NTP_PHASE_LIMIT;
442 439
443 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp), 440 delta_xsec = mulhdu( (tb_last_stamp-do_gtod.varp->tb_orig_stamp),
444 do_gtod.varp->tb_to_xs ); 441 do_gtod.varp->tb_to_xs );
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c
index a8d5e83ee89f..7467ae508e6e 100644
--- a/arch/ppc64/kernel/traps.c
+++ b/arch/ppc64/kernel/traps.c
@@ -30,6 +30,7 @@
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/kprobes.h>
33#include <asm/kdebug.h> 34#include <asm/kdebug.h>
34 35
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
@@ -220,7 +221,7 @@ void instruction_breakpoint_exception(struct pt_regs *regs)
220 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 221 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
221} 222}
222 223
223void single_step_exception(struct pt_regs *regs) 224void __kprobes single_step_exception(struct pt_regs *regs)
224{ 225{
225 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */ 226 regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
226 227
@@ -398,7 +399,7 @@ check_bug_trap(struct pt_regs *regs)
398 return 0; 399 return 0;
399} 400}
400 401
401void program_check_exception(struct pt_regs *regs) 402void __kprobes program_check_exception(struct pt_regs *regs)
402{ 403{
403 if (debugger_fault_handler(regs)) 404 if (debugger_fault_handler(regs))
404 return; 405 return;
diff --git a/arch/ppc64/kernel/vmlinux.lds.S b/arch/ppc64/kernel/vmlinux.lds.S
index 4103cc13f8d6..0306510bc4ff 100644
--- a/arch/ppc64/kernel/vmlinux.lds.S
+++ b/arch/ppc64/kernel/vmlinux.lds.S
@@ -15,6 +15,7 @@ SECTIONS
15 *(.text .text.*) 15 *(.text .text.*)
16 SCHED_TEXT 16 SCHED_TEXT
17 LOCK_TEXT 17 LOCK_TEXT
18 KPROBES_TEXT
18 *(.fixup) 19 *(.fixup)
19 . = ALIGN(4096); 20 . = ALIGN(4096);
20 _etext = .; 21 _etext = .;
diff --git a/arch/ppc64/mm/fault.c b/arch/ppc64/mm/fault.c
index 20b0f37e8bf8..772f0714a5b7 100644
--- a/arch/ppc64/mm/fault.c
+++ b/arch/ppc64/mm/fault.c
@@ -29,6 +29,7 @@
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/kprobes.h>
32 33
33#include <asm/page.h> 34#include <asm/page.h>
34#include <asm/pgtable.h> 35#include <asm/pgtable.h>
@@ -84,8 +85,8 @@ static int store_updates_sp(struct pt_regs *regs)
84 * The return value is 0 if the fault was handled, or the signal 85 * The return value is 0 if the fault was handled, or the signal
85 * number if this is a kernel fault that can't be handled here. 86 * number if this is a kernel fault that can't be handled here.
86 */ 87 */
87int do_page_fault(struct pt_regs *regs, unsigned long address, 88int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
88 unsigned long error_code) 89 unsigned long error_code)
89{ 90{
90 struct vm_area_struct * vma; 91 struct vm_area_struct * vma;
91 struct mm_struct *mm = current->mm; 92 struct mm_struct *mm = current->mm;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 8ca485676780..2fd75da15495 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -139,10 +139,7 @@ int do_settimeofday(struct timespec *tv)
139 set_normalized_timespec(&xtime, sec, nsec); 139 set_normalized_timespec(&xtime, sec, nsec);
140 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 140 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
141 141
142 time_adjust = 0; /* stop active adjtime() */ 142 ntp_clear();
143 time_status |= STA_UNSYNC;
144 time_maxerror = NTP_PHASE_LIMIT;
145 time_esterror = NTP_PHASE_LIMIT;
146 write_sequnlock_irq(&xtime_lock); 143 write_sequnlock_irq(&xtime_lock);
147 clock_was_set(); 144 clock_was_set();
148 return 0; 145 return 0;
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index adc8109f8b77..3e804c736e64 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -37,6 +37,10 @@ config GENERIC_CALIBRATE_DELAY
37 bool 37 bool
38 default y 38 default y
39 39
40config ARCH_MAY_HAVE_PC_FDC
41 bool
42 default y
43
40source "init/Kconfig" 44source "init/Kconfig"
41 45
42menu "System type" 46menu "System type"
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index df7a9b9d4cbf..02ca69918d7c 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -215,10 +215,7 @@ int do_settimeofday(struct timespec *tv)
215 set_normalized_timespec(&xtime, sec, nsec); 215 set_normalized_timespec(&xtime, sec, nsec);
216 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 216 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
217 217
218 time_adjust = 0; /* stop active adjtime() */ 218 ntp_clear();
219 time_status |= STA_UNSYNC;
220 time_maxerror = NTP_PHASE_LIMIT;
221 time_esterror = NTP_PHASE_LIMIT;
222 write_sequnlock_irq(&xtime_lock); 219 write_sequnlock_irq(&xtime_lock);
223 clock_was_set(); 220 clock_was_set();
224 221
@@ -234,7 +231,7 @@ static long last_rtc_update;
234 * timer_interrupt() needs to keep up the real-time clock, 231 * timer_interrupt() needs to keep up the real-time clock,
235 * as well as call the "do_timer()" routine every clocktick 232 * as well as call the "do_timer()" routine every clocktick
236 */ 233 */
237static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 234static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
238{ 235{
239 do_timer(regs); 236 do_timer(regs);
240#ifndef CONFIG_SMP 237#ifndef CONFIG_SMP
@@ -252,7 +249,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
252 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 249 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
253 * called as close as possible to 500 ms before the new second starts. 250 * called as close as possible to 500 ms before the new second starts.
254 */ 251 */
255 if ((time_status & STA_UNSYNC) == 0 && 252 if (ntp_synced() &&
256 xtime.tv_sec > last_rtc_update + 660 && 253 xtime.tv_sec > last_rtc_update + 660 &&
257 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 254 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
258 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 255 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -285,7 +282,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
285 * locally disabled. -arca 282 * locally disabled. -arca
286 */ 283 */
287 write_seqlock(&xtime_lock); 284 write_seqlock(&xtime_lock);
288 do_timer_interrupt(irq, NULL, regs); 285 do_timer_interrupt(irq, regs);
289 write_sequnlock(&xtime_lock); 286 write_sequnlock(&xtime_lock);
290 287
291 return IRQ_HANDLED; 288 return IRQ_HANDLED;
diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c
index 6c84da3efc73..f4a62a10053c 100644
--- a/arch/sh64/kernel/time.c
+++ b/arch/sh64/kernel/time.c
@@ -247,10 +247,7 @@ int do_settimeofday(struct timespec *tv)
247 set_normalized_timespec(&xtime, sec, nsec); 247 set_normalized_timespec(&xtime, sec, nsec);
248 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 248 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
249 249
250 time_adjust = 0; /* stop active adjtime() */ 250 ntp_clear();
251 time_status |= STA_UNSYNC;
252 time_maxerror = NTP_PHASE_LIMIT;
253 time_esterror = NTP_PHASE_LIMIT;
254 write_sequnlock_irq(&xtime_lock); 251 write_sequnlock_irq(&xtime_lock);
255 clock_was_set(); 252 clock_was_set();
256 253
@@ -303,7 +300,7 @@ static long last_rtc_update = 0;
303 * timer_interrupt() needs to keep up the real-time clock, 300 * timer_interrupt() needs to keep up the real-time clock,
304 * as well as call the "do_timer()" routine every clocktick 301 * as well as call the "do_timer()" routine every clocktick
305 */ 302 */
306static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 303static inline void do_timer_interrupt(int irq, struct pt_regs *regs)
307{ 304{
308 unsigned long long current_ctc; 305 unsigned long long current_ctc;
309 asm ("getcon cr62, %0" : "=r" (current_ctc)); 306 asm ("getcon cr62, %0" : "=r" (current_ctc));
@@ -328,7 +325,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg
328 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 325 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
329 * called as close as possible to 500 ms before the new second starts. 326 * called as close as possible to 500 ms before the new second starts.
330 */ 327 */
331 if ((time_status & STA_UNSYNC) == 0 && 328 if (ntp_synced() &&
332 xtime.tv_sec > last_rtc_update + 660 && 329 xtime.tv_sec > last_rtc_update + 660 &&
333 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 330 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
334 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 331 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -361,7 +358,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
361 * locally disabled. -arca 358 * locally disabled. -arca
362 */ 359 */
363 write_lock(&xtime_lock); 360 write_lock(&xtime_lock);
364 do_timer_interrupt(irq, NULL, regs); 361 do_timer_interrupt(irq, regs);
365 write_unlock(&xtime_lock); 362 write_unlock(&xtime_lock);
366 363
367 return IRQ_HANDLED; 364 return IRQ_HANDLED;
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index aca028aa29bf..aba05394d30a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -211,6 +211,10 @@ config GENERIC_CALIBRATE_DELAY
211 bool 211 bool
212 default y 212 default y
213 213
214config ARCH_MAY_HAVE_PC_FDC
215 bool
216 default y
217
214config SUN_PM 218config SUN_PM
215 bool 219 bool
216 default y 220 default y
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 597d3ff6ad68..36a40697b8d6 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -840,10 +840,7 @@ static int pci_do_settimeofday(struct timespec *tv)
840 840
841 xtime.tv_sec = tv->tv_sec; 841 xtime.tv_sec = tv->tv_sec;
842 xtime.tv_nsec = tv->tv_nsec; 842 xtime.tv_nsec = tv->tv_nsec;
843 time_adjust = 0; /* stop active adjtime() */ 843 ntp_clear();
844 time_status |= STA_UNSYNC;
845 time_maxerror = NTP_PHASE_LIMIT;
846 time_esterror = NTP_PHASE_LIMIT;
847 return 0; 844 return 0;
848} 845}
849 846
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
index 3b759aefc170..bc015e980341 100644
--- a/arch/sparc/kernel/time.c
+++ b/arch/sparc/kernel/time.c
@@ -139,7 +139,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
139 139
140 140
141 /* Determine when to update the Mostek clock. */ 141 /* Determine when to update the Mostek clock. */
142 if ((time_status & STA_UNSYNC) == 0 && 142 if (ntp_synced() &&
143 xtime.tv_sec > last_rtc_update + 660 && 143 xtime.tv_sec > last_rtc_update + 660 &&
144 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 144 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
145 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 145 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -554,10 +554,7 @@ static int sbus_do_settimeofday(struct timespec *tv)
554 set_normalized_timespec(&xtime, sec, nsec); 554 set_normalized_timespec(&xtime, sec, nsec);
555 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 555 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
556 556
557 time_adjust = 0; /* stop active adjtime() */ 557 ntp_clear();
558 time_status |= STA_UNSYNC;
559 time_maxerror = NTP_PHASE_LIMIT;
560 time_esterror = NTP_PHASE_LIMIT;
561 return 0; 558 return 0;
562} 559}
563 560
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 73ec6aec5ed5..1e9d8638a28a 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -26,6 +26,10 @@ config TIME_INTERPOLATION
26 bool 26 bool
27 default y 27 default y
28 28
29config ARCH_MAY_HAVE_PC_FDC
30 bool
31 default y
32
29choice 33choice
30 prompt "Kernel page size" 34 prompt "Kernel page size"
31 default SPARC64_PAGE_SIZE_8KB 35 default SPARC64_PAGE_SIZE_8KB
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c
index bbf11f85dab1..0d66d07c8c6e 100644
--- a/arch/sparc64/kernel/kprobes.c
+++ b/arch/sparc64/kernel/kprobes.c
@@ -8,6 +8,7 @@
8#include <linux/kprobes.h> 8#include <linux/kprobes.h>
9#include <asm/kdebug.h> 9#include <asm/kdebug.h>
10#include <asm/signal.h> 10#include <asm/signal.h>
11#include <asm/cacheflush.h>
11 12
12/* We do not have hardware single-stepping on sparc64. 13/* We do not have hardware single-stepping on sparc64.
13 * So we implement software single-stepping with breakpoint 14 * So we implement software single-stepping with breakpoint
@@ -37,31 +38,31 @@
37 * - Mark that we are no longer actively in a kprobe. 38 * - Mark that we are no longer actively in a kprobe.
38 */ 39 */
39 40
40int arch_prepare_kprobe(struct kprobe *p) 41int __kprobes arch_prepare_kprobe(struct kprobe *p)
41{ 42{
42 return 0; 43 return 0;
43} 44}
44 45
45void arch_copy_kprobe(struct kprobe *p) 46void __kprobes arch_copy_kprobe(struct kprobe *p)
46{ 47{
47 p->ainsn.insn[0] = *p->addr; 48 p->ainsn.insn[0] = *p->addr;
48 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 49 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
49 p->opcode = *p->addr; 50 p->opcode = *p->addr;
50} 51}
51 52
52void arch_arm_kprobe(struct kprobe *p) 53void __kprobes arch_arm_kprobe(struct kprobe *p)
53{ 54{
54 *p->addr = BREAKPOINT_INSTRUCTION; 55 *p->addr = BREAKPOINT_INSTRUCTION;
55 flushi(p->addr); 56 flushi(p->addr);
56} 57}
57 58
58void arch_disarm_kprobe(struct kprobe *p) 59void __kprobes arch_disarm_kprobe(struct kprobe *p)
59{ 60{
60 *p->addr = p->opcode; 61 *p->addr = p->opcode;
61 flushi(p->addr); 62 flushi(p->addr);
62} 63}
63 64
64void arch_remove_kprobe(struct kprobe *p) 65void __kprobes arch_remove_kprobe(struct kprobe *p)
65{ 66{
66} 67}
67 68
@@ -111,7 +112,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
111 } 112 }
112} 113}
113 114
114static int kprobe_handler(struct pt_regs *regs) 115static int __kprobes kprobe_handler(struct pt_regs *regs)
115{ 116{
116 struct kprobe *p; 117 struct kprobe *p;
117 void *addr = (void *) regs->tpc; 118 void *addr = (void *) regs->tpc;
@@ -191,8 +192,9 @@ no_kprobe:
191 * The original INSN location was REAL_PC, it actually 192 * The original INSN location was REAL_PC, it actually
192 * executed at PC and produced destination address NPC. 193 * executed at PC and produced destination address NPC.
193 */ 194 */
194static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc, 195static unsigned long __kprobes relbranch_fixup(u32 insn, unsigned long real_pc,
195 unsigned long pc, unsigned long npc) 196 unsigned long pc,
197 unsigned long npc)
196{ 198{
197 /* Branch not taken, no mods necessary. */ 199 /* Branch not taken, no mods necessary. */
198 if (npc == pc + 0x4UL) 200 if (npc == pc + 0x4UL)
@@ -217,7 +219,8 @@ static unsigned long relbranch_fixup(u32 insn, unsigned long real_pc,
217/* If INSN is an instruction which writes it's PC location 219/* If INSN is an instruction which writes it's PC location
218 * into a destination register, fix that up. 220 * into a destination register, fix that up.
219 */ 221 */
220static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc) 222static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
223 unsigned long real_pc)
221{ 224{
222 unsigned long *slot = NULL; 225 unsigned long *slot = NULL;
223 226
@@ -257,7 +260,7 @@ static void retpc_fixup(struct pt_regs *regs, u32 insn, unsigned long real_pc)
257 * This function prepares to return from the post-single-step 260 * This function prepares to return from the post-single-step
258 * breakpoint trap. 261 * breakpoint trap.
259 */ 262 */
260static void resume_execution(struct kprobe *p, struct pt_regs *regs) 263static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
261{ 264{
262 u32 insn = p->ainsn.insn[0]; 265 u32 insn = p->ainsn.insn[0];
263 266
@@ -315,8 +318,8 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
315/* 318/*
316 * Wrapper routine to for handling exceptions. 319 * Wrapper routine to for handling exceptions.
317 */ 320 */
318int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 321int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
319 void *data) 322 unsigned long val, void *data)
320{ 323{
321 struct die_args *args = (struct die_args *)data; 324 struct die_args *args = (struct die_args *)data;
322 switch (val) { 325 switch (val) {
@@ -344,7 +347,8 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
344 return NOTIFY_DONE; 347 return NOTIFY_DONE;
345} 348}
346 349
347asmlinkage void kprobe_trap(unsigned long trap_level, struct pt_regs *regs) 350asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
351 struct pt_regs *regs)
348{ 352{
349 BUG_ON(trap_level != 0x170 && trap_level != 0x171); 353 BUG_ON(trap_level != 0x170 && trap_level != 0x171);
350 354
@@ -368,7 +372,7 @@ static struct pt_regs jprobe_saved_regs;
368static struct pt_regs *jprobe_saved_regs_location; 372static struct pt_regs *jprobe_saved_regs_location;
369static struct sparc_stackf jprobe_saved_stack; 373static struct sparc_stackf jprobe_saved_stack;
370 374
371int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 375int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
372{ 376{
373 struct jprobe *jp = container_of(p, struct jprobe, kp); 377 struct jprobe *jp = container_of(p, struct jprobe, kp);
374 378
@@ -390,7 +394,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
390 return 1; 394 return 1;
391} 395}
392 396
393void jprobe_return(void) 397void __kprobes jprobe_return(void)
394{ 398{
395 preempt_enable_no_resched(); 399 preempt_enable_no_resched();
396 __asm__ __volatile__( 400 __asm__ __volatile__(
@@ -403,7 +407,7 @@ extern void jprobe_return_trap_instruction(void);
403 407
404extern void __show_regs(struct pt_regs * regs); 408extern void __show_regs(struct pt_regs * regs);
405 409
406int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 410int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
407{ 411{
408 u32 *addr = (u32 *) regs->tpc; 412 u32 *addr = (u32 *) regs->tpc;
409 413
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
index 87c1aeb02220..7654b8a7f03a 100644
--- a/arch/sparc64/kernel/sunos_ioctl32.c
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -152,11 +152,12 @@ asmlinkage int sunos_ioctl (int fd, u32 cmd, u32 arg)
152 ret = compat_sys_ioctl(fd, SIOCGIFCONF, arg); 152 ret = compat_sys_ioctl(fd, SIOCGIFCONF, arg);
153 goto out; 153 goto out;
154 154
155 case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */ 155 case _IOW('i', 21, struct ifreq32):
156 ret = sys_ioctl(fd, SIOCSIFMTU, arg); 156 ret = compat_sys_ioctl(fd, SIOCSIFMTU, arg);
157 goto out; 157 goto out;
158 case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */ 158
159 ret = sys_ioctl(fd, SIOCGIFMTU, arg); 159 case _IOWR('i', 22, struct ifreq32):
160 ret = compat_sys_ioctl(fd, SIOCGIFMTU, arg);
160 goto out; 161 goto out;
161 162
162 case _IOWR('i', 23, struct ifreq32): 163 case _IOWR('i', 23, struct ifreq32):
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 1d3aa588df8a..7f6239ed2521 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1002,29 +1002,7 @@ asmlinkage long sys32_adjtimex(struct timex32 __user *utp)
1002asmlinkage long sparc32_open(const char __user *filename, 1002asmlinkage long sparc32_open(const char __user *filename,
1003 int flags, int mode) 1003 int flags, int mode)
1004{ 1004{
1005 char * tmp; 1005 return do_sys_open(filename, flags, mode);
1006 int fd, error;
1007
1008 tmp = getname(filename);
1009 fd = PTR_ERR(tmp);
1010 if (!IS_ERR(tmp)) {
1011 fd = get_unused_fd();
1012 if (fd >= 0) {
1013 struct file * f = filp_open(tmp, flags, mode);
1014 error = PTR_ERR(f);
1015 if (IS_ERR(f))
1016 goto out_error;
1017 fd_install(fd, f);
1018 }
1019out:
1020 putname(tmp);
1021 }
1022 return fd;
1023
1024out_error:
1025 put_unused_fd(fd);
1026 fd = error;
1027 goto out;
1028} 1006}
1029 1007
1030extern unsigned long do_mremap(unsigned long addr, 1008extern unsigned long do_mremap(unsigned long addr,
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 362b9c26871b..3f08a32f51a1 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -449,7 +449,7 @@ static inline void timer_check_rtc(void)
449 static long last_rtc_update; 449 static long last_rtc_update;
450 450
451 /* Determine when to update the Mostek clock. */ 451 /* Determine when to update the Mostek clock. */
452 if ((time_status & STA_UNSYNC) == 0 && 452 if (ntp_synced() &&
453 xtime.tv_sec > last_rtc_update + 660 && 453 xtime.tv_sec > last_rtc_update + 660 &&
454 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 454 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
455 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 455 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 950423da8a6a..f47d0be39378 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -17,6 +17,7 @@ SECTIONS
17 *(.text) 17 *(.text)
18 SCHED_TEXT 18 SCHED_TEXT
19 LOCK_TEXT 19 LOCK_TEXT
20 KPROBES_TEXT
20 *(.gnu.warning) 21 *(.gnu.warning)
21 } =0 22 } =0
22 _etext = .; 23 _etext = .;
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 52e9375288a9..db1e3310e907 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -18,6 +18,7 @@
18#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/kprobes.h>
21 22
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
@@ -117,8 +118,9 @@ unsigned long __init prom_probe_memory (void)
117 return tally; 118 return tally;
118} 119}
119 120
120static void unhandled_fault(unsigned long address, struct task_struct *tsk, 121static void __kprobes unhandled_fault(unsigned long address,
121 struct pt_regs *regs) 122 struct task_struct *tsk,
123 struct pt_regs *regs)
122{ 124{
123 if ((unsigned long) address < PAGE_SIZE) { 125 if ((unsigned long) address < PAGE_SIZE) {
124 printk(KERN_ALERT "Unable to handle kernel NULL " 126 printk(KERN_ALERT "Unable to handle kernel NULL "
@@ -304,7 +306,7 @@ cannot_handle:
304 unhandled_fault (address, current, regs); 306 unhandled_fault (address, current, regs);
305} 307}
306 308
307asmlinkage void do_sparc64_fault(struct pt_regs *regs) 309asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
308{ 310{
309 struct mm_struct *mm = current->mm; 311 struct mm_struct *mm = current->mm;
310 struct vm_area_struct *vma; 312 struct vm_area_struct *vma;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3fbaf342a452..fdb1ebb308c9 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -19,6 +19,7 @@
19#include <linux/pagemap.h> 19#include <linux/pagemap.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/kprobes.h>
22 23
23#include <asm/head.h> 24#include <asm/head.h>
24#include <asm/system.h> 25#include <asm/system.h>
@@ -250,7 +251,7 @@ out:
250 put_cpu(); 251 put_cpu();
251} 252}
252 253
253void flush_icache_range(unsigned long start, unsigned long end) 254void __kprobes flush_icache_range(unsigned long start, unsigned long end)
254{ 255{
255 /* Cheetah has coherent I-cache. */ 256 /* Cheetah has coherent I-cache. */
256 if (tlb_type == spitfire) { 257 if (tlb_type == spitfire) {
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 8dfa825eca51..b2ee9b53227f 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -119,6 +119,7 @@ __spitfire_flush_tlb_mm_slow:
119#else 119#else
120#error unsupported PAGE_SIZE 120#error unsupported PAGE_SIZE
121#endif 121#endif
122 .section .kprobes.text, "ax"
122 .align 32 123 .align 32
123 .globl __flush_icache_page 124 .globl __flush_icache_page
124__flush_icache_page: /* %o0 = phys_page */ 125__flush_icache_page: /* %o0 = phys_page */
@@ -201,6 +202,7 @@ dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
201 nop 202 nop
202#endif /* DCACHE_ALIASING_POSSIBLE */ 203#endif /* DCACHE_ALIASING_POSSIBLE */
203 204
205 .previous .text
204 .align 32 206 .align 32
205__prefill_dtlb: 207__prefill_dtlb:
206 rdpr %pstate, %g7 208 rdpr %pstate, %g7
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index a777e57dbf89..1ab431a53ac3 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -27,7 +27,7 @@ export LDFLAGS HOSTCFLAGS HOSTLDFLAGS UML_OBJCOPYFLAGS
27endif 27endif
28endif 28endif
29 29
30CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH) $(STUB_CFLAGS) 30CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH)
31 31
32ifneq ($(CONFIG_GPROF),y) 32ifneq ($(CONFIG_GPROF),y)
33ARCH_CFLAGS += -DUM_FASTCALL 33ARCH_CFLAGS += -DUM_FASTCALL
diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h
index d705daa2d854..0aa620970adb 100644
--- a/arch/um/include/common-offsets.h
+++ b/arch/um/include/common-offsets.h
@@ -12,3 +12,4 @@ DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
12DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE); 12DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
13DEFINE_STR(UM_KERN_INFO, KERN_INFO); 13DEFINE_STR(UM_KERN_INFO, KERN_INFO);
14DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG); 14DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
15DEFINE(HOST_ELF_CLASS, ELF_CLASS);
diff --git a/arch/um/include/um_uaccess.h b/arch/um/include/um_uaccess.h
index 6e348cb6de24..84c0868cd561 100644
--- a/arch/um/include/um_uaccess.h
+++ b/arch/um/include/um_uaccess.h
@@ -20,13 +20,6 @@
20#define access_ok(type, addr, size) \ 20#define access_ok(type, addr, size) \
21 CHOOSE_MODE_PROC(access_ok_tt, access_ok_skas, type, addr, size) 21 CHOOSE_MODE_PROC(access_ok_tt, access_ok_skas, type, addr, size)
22 22
23/* this function will go away soon - use access_ok() instead */
24static inline int __deprecated verify_area(int type, const void __user *addr, unsigned long size)
25{
26 return (CHOOSE_MODE_PROC(verify_area_tt, verify_area_skas, type, addr,
27 size));
28}
29
30static inline int copy_from_user(void *to, const void __user *from, int n) 23static inline int copy_from_user(void *to, const void __user *from, int n)
31{ 24{
32 return(CHOOSE_MODE_PROC(copy_from_user_tt, copy_from_user_skas, to, 25 return(CHOOSE_MODE_PROC(copy_from_user_tt, copy_from_user_skas, to,
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 5597bd39e6b5..64fa062cc119 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -196,7 +196,7 @@ static void init_highmem(void)
196 196
197static void __init fixaddr_user_init( void) 197static void __init fixaddr_user_init( void)
198{ 198{
199#if CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA 199#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
200 long size = FIXADDR_USER_END - FIXADDR_USER_START; 200 long size = FIXADDR_USER_END - FIXADDR_USER_START;
201 pgd_t *pgd; 201 pgd_t *pgd;
202 pud_t *pud; 202 pud_t *pud;
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index d3c1560e3ed8..7a1662419c0c 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -9,6 +9,9 @@ obj-y = aio.o elf_aux.o file.o process.o signal.o start_up.o time.o tt.o \
9USER_OBJS := aio.o elf_aux.o file.o process.o signal.o start_up.o time.o tt.o \ 9USER_OBJS := aio.o elf_aux.o file.o process.o signal.o start_up.o time.o tt.o \
10 tty.o 10 tty.o
11 11
12elf_aux.o: $(ARCH_DIR)/kernel-offsets.h
13CFLAGS_elf_aux.o += -I$(objtree)/arch/um
14
12CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH) 15CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH)
13 16
14HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \ 17HAVE_AIO_ABI := $(shell [ -r /usr/include/linux/aio_abi.h ] && \
diff --git a/arch/um/os-Linux/elf_aux.c b/arch/um/os-Linux/elf_aux.c
index 4cca3e9c23fe..1399520a8588 100644
--- a/arch/um/os-Linux/elf_aux.c
+++ b/arch/um/os-Linux/elf_aux.c
@@ -12,8 +12,9 @@
12#include "init.h" 12#include "init.h"
13#include "elf_user.h" 13#include "elf_user.h"
14#include "mem_user.h" 14#include "mem_user.h"
15#include <kernel-offsets.h>
15 16
16#if ELF_CLASS == ELFCLASS32 17#if HOST_ELF_CLASS == ELFCLASS32
17typedef Elf32_auxv_t elf_auxv_t; 18typedef Elf32_auxv_t elf_auxv_t;
18#else 19#else
19typedef Elf64_auxv_t elf_auxv_t; 20typedef Elf64_auxv_t elf_auxv_t;
diff --git a/arch/um/sys-i386/kernel-offsets.c b/arch/um/sys-i386/kernel-offsets.c
index 9f8ecd1fdd96..a1070af2bcd8 100644
--- a/arch/um/sys-i386/kernel-offsets.c
+++ b/arch/um/sys-i386/kernel-offsets.c
@@ -2,6 +2,7 @@
2#include <linux/stddef.h> 2#include <linux/stddef.h>
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/elf.h>
5#include <asm/page.h> 6#include <asm/page.h>
6 7
7#define DEFINE(sym, val) \ 8#define DEFINE(sym, val) \
diff --git a/arch/um/sys-x86_64/kernel-offsets.c b/arch/um/sys-x86_64/kernel-offsets.c
index 220e875cbe29..998541eade41 100644
--- a/arch/um/sys-x86_64/kernel-offsets.c
+++ b/arch/um/sys-x86_64/kernel-offsets.c
@@ -2,6 +2,7 @@
2#include <linux/stddef.h> 2#include <linux/stddef.h>
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/elf.h>
5#include <asm/page.h> 6#include <asm/page.h>
6 7
7#define DEFINE(sym, val) \ 8#define DEFINE(sym, val) \
diff --git a/arch/v850/kernel/time.c b/arch/v850/kernel/time.c
index f722a268238a..ea3fd8844ff0 100644
--- a/arch/v850/kernel/time.c
+++ b/arch/v850/kernel/time.c
@@ -66,7 +66,7 @@ static irqreturn_t timer_interrupt (int irq, void *dummy, struct pt_regs *regs)
66 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 66 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
67 * called as close as possible to 500 ms before the new second starts. 67 * called as close as possible to 500 ms before the new second starts.
68 */ 68 */
69 if ((time_status & STA_UNSYNC) == 0 && 69 if (ntp_synced() &&
70 xtime.tv_sec > last_rtc_update + 660 && 70 xtime.tv_sec > last_rtc_update + 660 &&
71 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && 71 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
72 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { 72 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
@@ -169,10 +169,7 @@ int do_settimeofday(struct timespec *tv)
169 xtime.tv_sec = tv->tv_sec; 169 xtime.tv_sec = tv->tv_sec;
170 xtime.tv_nsec = tv->tv_nsec; 170 xtime.tv_nsec = tv->tv_nsec;
171 171
172 time_adjust = 0; /* stop active adjtime () */ 172 ntp_clear();
173 time_status |= STA_UNSYNC;
174 time_maxerror = NTP_PHASE_LIMIT;
175 time_esterror = NTP_PHASE_LIMIT;
176 173
177 write_sequnlock_irq (&xtime_lock); 174 write_sequnlock_irq (&xtime_lock);
178 clock_was_set(); 175 clock_was_set();
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 75e52c57f19c..8f868b67ef0f 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -65,6 +65,10 @@ config GENERIC_IOMAP
65 bool 65 bool
66 default y 66 default y
67 67
68config ARCH_MAY_HAVE_PC_FDC
69 bool
70 default y
71
68source "init/Kconfig" 72source "init/Kconfig"
69 73
70 74
@@ -441,6 +445,11 @@ config ISA_DMA_API
441 bool 445 bool
442 default y 446 default y
443 447
448config GENERIC_PENDING_IRQ
449 bool
450 depends on GENERIC_HARDIRQS && SMP
451 default y
452
444menu "Power management options" 453menu "Power management options"
445 454
446source kernel/power/Kconfig 455source kernel/power/Kconfig
diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S
index ff58b2832b75..12ea0b6c52e2 100644
--- a/arch/x86_64/boot/setup.S
+++ b/arch/x86_64/boot/setup.S
@@ -81,7 +81,7 @@ start:
81# This is the setup header, and it must start at %cs:2 (old 0x9020:2) 81# This is the setup header, and it must start at %cs:2 (old 0x9020:2)
82 82
83 .ascii "HdrS" # header signature 83 .ascii "HdrS" # header signature
84 .word 0x0203 # header version number (>= 0x0105) 84 .word 0x0204 # header version number (>= 0x0105)
85 # or else old loadlin-1.5 will fail) 85 # or else old loadlin-1.5 will fail)
86realmode_swtch: .word 0, 0 # default_switch, SETUPSEG 86realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
87start_sys_seg: .word SYSSEG 87start_sys_seg: .word SYSSEG
diff --git a/arch/x86_64/boot/tools/build.c b/arch/x86_64/boot/tools/build.c
index 18b5bac1c428..c44f5e2ec100 100644
--- a/arch/x86_64/boot/tools/build.c
+++ b/arch/x86_64/boot/tools/build.c
@@ -178,7 +178,9 @@ int main(int argc, char ** argv)
178 die("Output: seek failed"); 178 die("Output: seek failed");
179 buf[0] = (sys_size & 0xff); 179 buf[0] = (sys_size & 0xff);
180 buf[1] = ((sys_size >> 8) & 0xff); 180 buf[1] = ((sys_size >> 8) & 0xff);
181 if (write(1, buf, 2) != 2) 181 buf[2] = ((sys_size >> 16) & 0xff);
182 buf[3] = ((sys_size >> 24) & 0xff);
183 if (write(1, buf, 4) != 4)
182 die("Write of image length failed"); 184 die("Write of image length failed");
183 185
184 return 0; /* Everything is OK */ 186 return 0; /* Everything is OK */
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S
index c45d6a05b984..f174083d5567 100644
--- a/arch/x86_64/ia32/ia32entry.S
+++ b/arch/x86_64/ia32/ia32entry.S
@@ -307,7 +307,7 @@ ia32_sys_call_table:
307 .quad stub32_fork 307 .quad stub32_fork
308 .quad sys_read 308 .quad sys_read
309 .quad sys_write 309 .quad sys_write
310 .quad sys32_open /* 5 */ 310 .quad compat_sys_open /* 5 */
311 .quad sys_close 311 .quad sys_close
312 .quad sys32_waitpid 312 .quad sys32_waitpid
313 .quad sys_creat 313 .quad sys_creat
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c
index be996d1b691e..04d80406ce4f 100644
--- a/arch/x86_64/ia32/sys_ia32.c
+++ b/arch/x86_64/ia32/sys_ia32.c
@@ -969,32 +969,6 @@ long sys32_kill(int pid, int sig)
969 return sys_kill(pid, sig); 969 return sys_kill(pid, sig);
970} 970}
971 971
972asmlinkage long sys32_open(const char __user * filename, int flags, int mode)
973{
974 char * tmp;
975 int fd, error;
976
977 /* don't force O_LARGEFILE */
978 tmp = getname(filename);
979 fd = PTR_ERR(tmp);
980 if (!IS_ERR(tmp)) {
981 fd = get_unused_fd();
982 if (fd >= 0) {
983 struct file *f = filp_open(tmp, flags, mode);
984 error = PTR_ERR(f);
985 if (IS_ERR(f)) {
986 put_unused_fd(fd);
987 fd = error;
988 } else {
989 fsnotify_open(f->f_dentry);
990 fd_install(fd, f);
991 }
992 }
993 putname(tmp);
994 }
995 return fd;
996}
997
998extern asmlinkage long 972extern asmlinkage long
999sys_timer_create(clockid_t which_clock, 973sys_timer_create(clockid_t which_clock,
1000 struct sigevent __user *timer_event_spec, 974 struct sigevent __user *timer_event_spec,
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index b548dea4e5b9..116ac5f53dce 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -85,7 +85,7 @@ int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
85 struct e820entry *ei = &e820.map[i]; 85 struct e820entry *ei = &e820.map[i];
86 if (type && ei->type != type) 86 if (type && ei->type != type)
87 continue; 87 continue;
88 if (ei->addr >= end || ei->addr + ei->size < start) 88 if (ei->addr >= end || ei->addr + ei->size <= start)
89 continue; 89 continue;
90 return 1; 90 return 1;
91 } 91 }
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 096d470e280f..be51dbe1f75e 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -784,8 +784,9 @@ ENTRY(execve)
784 ret 784 ret
785 CFI_ENDPROC 785 CFI_ENDPROC
786 786
787ENTRY(page_fault) 787KPROBE_ENTRY(page_fault)
788 errorentry do_page_fault 788 errorentry do_page_fault
789 .previous .text
789 790
790ENTRY(coprocessor_error) 791ENTRY(coprocessor_error)
791 zeroentry do_coprocessor_error 792 zeroentry do_coprocessor_error
@@ -797,13 +798,14 @@ ENTRY(device_not_available)
797 zeroentry math_state_restore 798 zeroentry math_state_restore
798 799
799 /* runs on exception stack */ 800 /* runs on exception stack */
800ENTRY(debug) 801KPROBE_ENTRY(debug)
801 CFI_STARTPROC 802 CFI_STARTPROC
802 pushq $0 803 pushq $0
803 CFI_ADJUST_CFA_OFFSET 8 804 CFI_ADJUST_CFA_OFFSET 8
804 paranoidentry do_debug 805 paranoidentry do_debug
805 jmp paranoid_exit 806 jmp paranoid_exit
806 CFI_ENDPROC 807 CFI_ENDPROC
808 .previous .text
807 809
808 /* runs on exception stack */ 810 /* runs on exception stack */
809ENTRY(nmi) 811ENTRY(nmi)
@@ -854,8 +856,9 @@ paranoid_schedule:
854 jmp paranoid_userspace 856 jmp paranoid_userspace
855 CFI_ENDPROC 857 CFI_ENDPROC
856 858
857ENTRY(int3) 859KPROBE_ENTRY(int3)
858 zeroentry do_int3 860 zeroentry do_int3
861 .previous .text
859 862
860ENTRY(overflow) 863ENTRY(overflow)
861 zeroentry do_overflow 864 zeroentry do_overflow
@@ -892,8 +895,9 @@ ENTRY(stack_segment)
892 jmp paranoid_exit 895 jmp paranoid_exit
893 CFI_ENDPROC 896 CFI_ENDPROC
894 897
895ENTRY(general_protection) 898KPROBE_ENTRY(general_protection)
896 errorentry do_general_protection 899 errorentry do_general_protection
900 .previous .text
897 901
898ENTRY(alignment_check) 902ENTRY(alignment_check)
899 errorentry do_alignment_check 903 errorentry do_alignment_check
diff --git a/arch/x86_64/kernel/genapic.c b/arch/x86_64/kernel/genapic.c
index 30c843a5efdd..f062aa03bab7 100644
--- a/arch/x86_64/kernel/genapic.c
+++ b/arch/x86_64/kernel/genapic.c
@@ -25,7 +25,7 @@
25#endif 25#endif
26 26
27/* which logical CPU number maps to which CPU (physical APIC ID) */ 27/* which logical CPU number maps to which CPU (physical APIC ID) */
28u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 28u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
29EXPORT_SYMBOL(x86_cpu_to_apicid); 29EXPORT_SYMBOL(x86_cpu_to_apicid);
30u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 30u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
31 31
diff --git a/arch/x86_64/kernel/genapic_cluster.c b/arch/x86_64/kernel/genapic_cluster.c
index 9703da7202e3..f6523dd1bc09 100644
--- a/arch/x86_64/kernel/genapic_cluster.c
+++ b/arch/x86_64/kernel/genapic_cluster.c
@@ -72,10 +72,14 @@ static void cluster_send_IPI_mask(cpumask_t mask, int vector)
72static void cluster_send_IPI_allbutself(int vector) 72static void cluster_send_IPI_allbutself(int vector)
73{ 73{
74 cpumask_t mask = cpu_online_map; 74 cpumask_t mask = cpu_online_map;
75 cpu_clear(smp_processor_id(), mask); 75 int me = get_cpu(); /* Ensure we are not preempted when we clear */
76
77 cpu_clear(me, mask);
76 78
77 if (!cpus_empty(mask)) 79 if (!cpus_empty(mask))
78 cluster_send_IPI_mask(mask, vector); 80 cluster_send_IPI_mask(mask, vector);
81
82 put_cpu();
79} 83}
80 84
81static void cluster_send_IPI_all(int vector) 85static void cluster_send_IPI_all(int vector)
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index d206d7e49cf5..ba1a744e9bf0 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -70,7 +70,7 @@ static struct irq_pin_list {
70 short apic, pin, next; 70 short apic, pin, next;
71} irq_2_pin[PIN_MAP_SIZE]; 71} irq_2_pin[PIN_MAP_SIZE];
72 72
73int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 73int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
74#ifdef CONFIG_PCI_MSI 74#ifdef CONFIG_PCI_MSI
75#define vector_to_irq(vector) \ 75#define vector_to_irq(vector) \
76 (platform_legacy_irq(vector) ? vector : vector_irq[vector]) 76 (platform_legacy_irq(vector) ? vector : vector_irq[vector])
@@ -78,6 +78,54 @@ int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
78#define vector_to_irq(vector) (vector) 78#define vector_to_irq(vector) (vector)
79#endif 79#endif
80 80
81#define __DO_ACTION(R, ACTION, FINAL) \
82 \
83{ \
84 int pin; \
85 struct irq_pin_list *entry = irq_2_pin + irq; \
86 \
87 for (;;) { \
88 unsigned int reg; \
89 pin = entry->pin; \
90 if (pin == -1) \
91 break; \
92 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
93 reg ACTION; \
94 io_apic_modify(entry->apic, reg); \
95 if (!entry->next) \
96 break; \
97 entry = irq_2_pin + entry->next; \
98 } \
99 FINAL; \
100}
101
102#ifdef CONFIG_SMP
103static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
104{
105 unsigned long flags;
106 unsigned int dest;
107 cpumask_t tmp;
108
109 cpus_and(tmp, mask, cpu_online_map);
110 if (cpus_empty(tmp))
111 tmp = TARGET_CPUS;
112
113 cpus_and(mask, tmp, CPU_MASK_ALL);
114
115 dest = cpu_mask_to_apicid(mask);
116
117 /*
118 * Only the high 8 bits are valid.
119 */
120 dest = SET_APIC_LOGICAL_ID(dest);
121
122 spin_lock_irqsave(&ioapic_lock, flags);
123 __DO_ACTION(1, = dest, )
124 set_irq_info(irq, mask);
125 spin_unlock_irqrestore(&ioapic_lock, flags);
126}
127#endif
128
81/* 129/*
82 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 130 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
83 * shared ISA-space IRQs, so we have to support them. We are super 131 * shared ISA-space IRQs, so we have to support them. We are super
@@ -101,26 +149,6 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
101 entry->pin = pin; 149 entry->pin = pin;
102} 150}
103 151
104#define __DO_ACTION(R, ACTION, FINAL) \
105 \
106{ \
107 int pin; \
108 struct irq_pin_list *entry = irq_2_pin + irq; \
109 \
110 for (;;) { \
111 unsigned int reg; \
112 pin = entry->pin; \
113 if (pin == -1) \
114 break; \
115 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
116 reg ACTION; \
117 io_apic_modify(entry->apic, reg); \
118 if (!entry->next) \
119 break; \
120 entry = irq_2_pin + entry->next; \
121 } \
122 FINAL; \
123}
124 152
125#define DO_ACTION(name,R,ACTION, FINAL) \ 153#define DO_ACTION(name,R,ACTION, FINAL) \
126 \ 154 \
@@ -655,7 +683,7 @@ static inline int IO_APIC_irq_trigger(int irq)
655} 683}
656 684
657/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */ 685/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
658u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; 686u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
659 687
660int assign_irq_vector(int irq) 688int assign_irq_vector(int irq)
661{ 689{
@@ -767,6 +795,7 @@ static void __init setup_IO_APIC_irqs(void)
767 spin_lock_irqsave(&ioapic_lock, flags); 795 spin_lock_irqsave(&ioapic_lock, flags);
768 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1)); 796 io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
769 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0)); 797 io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
798 set_native_irq_info(irq, TARGET_CPUS);
770 spin_unlock_irqrestore(&ioapic_lock, flags); 799 spin_unlock_irqrestore(&ioapic_lock, flags);
771 } 800 }
772 } 801 }
@@ -1314,6 +1343,7 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
1314 */ 1343 */
1315static void ack_edge_ioapic_irq(unsigned int irq) 1344static void ack_edge_ioapic_irq(unsigned int irq)
1316{ 1345{
1346 move_irq(irq);
1317 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) 1347 if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
1318 == (IRQ_PENDING | IRQ_DISABLED)) 1348 == (IRQ_PENDING | IRQ_DISABLED))
1319 mask_IO_APIC_irq(irq); 1349 mask_IO_APIC_irq(irq);
@@ -1343,26 +1373,10 @@ static unsigned int startup_level_ioapic_irq (unsigned int irq)
1343 1373
1344static void end_level_ioapic_irq (unsigned int irq) 1374static void end_level_ioapic_irq (unsigned int irq)
1345{ 1375{
1376 move_irq(irq);
1346 ack_APIC_irq(); 1377 ack_APIC_irq();
1347} 1378}
1348 1379
1349static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
1350{
1351 unsigned long flags;
1352 unsigned int dest;
1353
1354 dest = cpu_mask_to_apicid(mask);
1355
1356 /*
1357 * Only the high 8 bits are valid.
1358 */
1359 dest = SET_APIC_LOGICAL_ID(dest);
1360
1361 spin_lock_irqsave(&ioapic_lock, flags);
1362 __DO_ACTION(1, = dest, )
1363 spin_unlock_irqrestore(&ioapic_lock, flags);
1364}
1365
1366#ifdef CONFIG_PCI_MSI 1380#ifdef CONFIG_PCI_MSI
1367static unsigned int startup_edge_ioapic_vector(unsigned int vector) 1381static unsigned int startup_edge_ioapic_vector(unsigned int vector)
1368{ 1382{
@@ -1375,6 +1389,7 @@ static void ack_edge_ioapic_vector(unsigned int vector)
1375{ 1389{
1376 int irq = vector_to_irq(vector); 1390 int irq = vector_to_irq(vector);
1377 1391
1392 move_native_irq(vector);
1378 ack_edge_ioapic_irq(irq); 1393 ack_edge_ioapic_irq(irq);
1379} 1394}
1380 1395
@@ -1389,6 +1404,7 @@ static void end_level_ioapic_vector (unsigned int vector)
1389{ 1404{
1390 int irq = vector_to_irq(vector); 1405 int irq = vector_to_irq(vector);
1391 1406
1407 move_native_irq(vector);
1392 end_level_ioapic_irq(irq); 1408 end_level_ioapic_irq(irq);
1393} 1409}
1394 1410
@@ -1406,14 +1422,17 @@ static void unmask_IO_APIC_vector (unsigned int vector)
1406 unmask_IO_APIC_irq(irq); 1422 unmask_IO_APIC_irq(irq);
1407} 1423}
1408 1424
1425#ifdef CONFIG_SMP
1409static void set_ioapic_affinity_vector (unsigned int vector, 1426static void set_ioapic_affinity_vector (unsigned int vector,
1410 cpumask_t cpu_mask) 1427 cpumask_t cpu_mask)
1411{ 1428{
1412 int irq = vector_to_irq(vector); 1429 int irq = vector_to_irq(vector);
1413 1430
1431 set_native_irq_info(vector, cpu_mask);
1414 set_ioapic_affinity_irq(irq, cpu_mask); 1432 set_ioapic_affinity_irq(irq, cpu_mask);
1415} 1433}
1416#endif 1434#endif // CONFIG_SMP
1435#endif // CONFIG_PCI_MSI
1417 1436
1418/* 1437/*
1419 * Level and edge triggered IO-APIC interrupts need different handling, 1438 * Level and edge triggered IO-APIC interrupts need different handling,
@@ -1424,7 +1443,7 @@ static void set_ioapic_affinity_vector (unsigned int vector,
1424 * races. 1443 * races.
1425 */ 1444 */
1426 1445
1427static struct hw_interrupt_type ioapic_edge_type = { 1446static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
1428 .typename = "IO-APIC-edge", 1447 .typename = "IO-APIC-edge",
1429 .startup = startup_edge_ioapic, 1448 .startup = startup_edge_ioapic,
1430 .shutdown = shutdown_edge_ioapic, 1449 .shutdown = shutdown_edge_ioapic,
@@ -1432,10 +1451,12 @@ static struct hw_interrupt_type ioapic_edge_type = {
1432 .disable = disable_edge_ioapic, 1451 .disable = disable_edge_ioapic,
1433 .ack = ack_edge_ioapic, 1452 .ack = ack_edge_ioapic,
1434 .end = end_edge_ioapic, 1453 .end = end_edge_ioapic,
1454#ifdef CONFIG_SMP
1435 .set_affinity = set_ioapic_affinity, 1455 .set_affinity = set_ioapic_affinity,
1456#endif
1436}; 1457};
1437 1458
1438static struct hw_interrupt_type ioapic_level_type = { 1459static struct hw_interrupt_type ioapic_level_type __read_mostly = {
1439 .typename = "IO-APIC-level", 1460 .typename = "IO-APIC-level",
1440 .startup = startup_level_ioapic, 1461 .startup = startup_level_ioapic,
1441 .shutdown = shutdown_level_ioapic, 1462 .shutdown = shutdown_level_ioapic,
@@ -1443,7 +1464,9 @@ static struct hw_interrupt_type ioapic_level_type = {
1443 .disable = disable_level_ioapic, 1464 .disable = disable_level_ioapic,
1444 .ack = mask_and_ack_level_ioapic, 1465 .ack = mask_and_ack_level_ioapic,
1445 .end = end_level_ioapic, 1466 .end = end_level_ioapic,
1467#ifdef CONFIG_SMP
1446 .set_affinity = set_ioapic_affinity, 1468 .set_affinity = set_ioapic_affinity,
1469#endif
1447}; 1470};
1448 1471
1449static inline void init_IO_APIC_traps(void) 1472static inline void init_IO_APIC_traps(void)
@@ -1506,7 +1529,7 @@ static void ack_lapic_irq (unsigned int irq)
1506 1529
1507static void end_lapic_irq (unsigned int i) { /* nothing */ } 1530static void end_lapic_irq (unsigned int i) { /* nothing */ }
1508 1531
1509static struct hw_interrupt_type lapic_irq_type = { 1532static struct hw_interrupt_type lapic_irq_type __read_mostly = {
1510 .typename = "local-APIC-edge", 1533 .typename = "local-APIC-edge",
1511 .startup = NULL, /* startup_irq() not used for IRQ0 */ 1534 .startup = NULL, /* startup_irq() not used for IRQ0 */
1512 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */ 1535 .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
@@ -1918,6 +1941,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
1918 spin_lock_irqsave(&ioapic_lock, flags); 1941 spin_lock_irqsave(&ioapic_lock, flags);
1919 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); 1942 io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
1920 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); 1943 io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
1944 set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
1921 spin_unlock_irqrestore(&ioapic_lock, flags); 1945 spin_unlock_irqrestore(&ioapic_lock, flags);
1922 1946
1923 return 0; 1947 return 0;
@@ -1931,6 +1955,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
1931 * we need to reprogram the ioredtbls to cater for the cpus which have come online 1955 * we need to reprogram the ioredtbls to cater for the cpus which have come online
1932 * so mask in all cases should simply be TARGET_CPUS 1956 * so mask in all cases should simply be TARGET_CPUS
1933 */ 1957 */
1958#ifdef CONFIG_SMP
1934void __init setup_ioapic_dest(void) 1959void __init setup_ioapic_dest(void)
1935{ 1960{
1936 int pin, ioapic, irq, irq_entry; 1961 int pin, ioapic, irq, irq_entry;
@@ -1949,3 +1974,4 @@ void __init setup_ioapic_dest(void)
1949 1974
1950 } 1975 }
1951} 1976}
1977#endif
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 5c6dc7051482..df08c43276a0 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -74,7 +74,7 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn)
74 return 0; 74 return 0;
75} 75}
76 76
77int arch_prepare_kprobe(struct kprobe *p) 77int __kprobes arch_prepare_kprobe(struct kprobe *p)
78{ 78{
79 /* insn: must be on special executable page on x86_64. */ 79 /* insn: must be on special executable page on x86_64. */
80 up(&kprobe_mutex); 80 up(&kprobe_mutex);
@@ -189,7 +189,7 @@ static inline s32 *is_riprel(u8 *insn)
189 return NULL; 189 return NULL;
190} 190}
191 191
192void arch_copy_kprobe(struct kprobe *p) 192void __kprobes arch_copy_kprobe(struct kprobe *p)
193{ 193{
194 s32 *ripdisp; 194 s32 *ripdisp;
195 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); 195 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
@@ -215,21 +215,21 @@ void arch_copy_kprobe(struct kprobe *p)
215 p->opcode = *p->addr; 215 p->opcode = *p->addr;
216} 216}
217 217
218void arch_arm_kprobe(struct kprobe *p) 218void __kprobes arch_arm_kprobe(struct kprobe *p)
219{ 219{
220 *p->addr = BREAKPOINT_INSTRUCTION; 220 *p->addr = BREAKPOINT_INSTRUCTION;
221 flush_icache_range((unsigned long) p->addr, 221 flush_icache_range((unsigned long) p->addr,
222 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 222 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
223} 223}
224 224
225void arch_disarm_kprobe(struct kprobe *p) 225void __kprobes arch_disarm_kprobe(struct kprobe *p)
226{ 226{
227 *p->addr = p->opcode; 227 *p->addr = p->opcode;
228 flush_icache_range((unsigned long) p->addr, 228 flush_icache_range((unsigned long) p->addr,
229 (unsigned long) p->addr + sizeof(kprobe_opcode_t)); 229 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
230} 230}
231 231
232void arch_remove_kprobe(struct kprobe *p) 232void __kprobes arch_remove_kprobe(struct kprobe *p)
233{ 233{
234 up(&kprobe_mutex); 234 up(&kprobe_mutex);
235 free_insn_slot(p->ainsn.insn); 235 free_insn_slot(p->ainsn.insn);
@@ -261,7 +261,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs)
261 kprobe_saved_rflags &= ~IF_MASK; 261 kprobe_saved_rflags &= ~IF_MASK;
262} 262}
263 263
264static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 264static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
265{ 265{
266 regs->eflags |= TF_MASK; 266 regs->eflags |= TF_MASK;
267 regs->eflags &= ~IF_MASK; 267 regs->eflags &= ~IF_MASK;
@@ -272,7 +272,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
272 regs->rip = (unsigned long)p->ainsn.insn; 272 regs->rip = (unsigned long)p->ainsn.insn;
273} 273}
274 274
275void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) 275void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
276 struct pt_regs *regs)
276{ 277{
277 unsigned long *sara = (unsigned long *)regs->rsp; 278 unsigned long *sara = (unsigned long *)regs->rsp;
278 struct kretprobe_instance *ri; 279 struct kretprobe_instance *ri;
@@ -295,7 +296,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs)
295 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 296 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
296 * remain disabled thorough out this function. 297 * remain disabled thorough out this function.
297 */ 298 */
298int kprobe_handler(struct pt_regs *regs) 299int __kprobes kprobe_handler(struct pt_regs *regs)
299{ 300{
300 struct kprobe *p; 301 struct kprobe *p;
301 int ret = 0; 302 int ret = 0;
@@ -310,7 +311,8 @@ int kprobe_handler(struct pt_regs *regs)
310 Disarm the probe we just hit, and ignore it. */ 311 Disarm the probe we just hit, and ignore it. */
311 p = get_kprobe(addr); 312 p = get_kprobe(addr);
312 if (p) { 313 if (p) {
313 if (kprobe_status == KPROBE_HIT_SS) { 314 if (kprobe_status == KPROBE_HIT_SS &&
315 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
314 regs->eflags &= ~TF_MASK; 316 regs->eflags &= ~TF_MASK;
315 regs->eflags |= kprobe_saved_rflags; 317 regs->eflags |= kprobe_saved_rflags;
316 unlock_kprobes(); 318 unlock_kprobes();
@@ -360,7 +362,10 @@ int kprobe_handler(struct pt_regs *regs)
360 * either a probepoint or a debugger breakpoint 362 * either a probepoint or a debugger breakpoint
361 * at this address. In either case, no further 363 * at this address. In either case, no further
362 * handling of this interrupt is appropriate. 364 * handling of this interrupt is appropriate.
365 * Back up over the (now missing) int3 and run
366 * the original instruction.
363 */ 367 */
368 regs->rip = (unsigned long)addr;
364 ret = 1; 369 ret = 1;
365 } 370 }
366 /* Not one of ours: let kernel handle it */ 371 /* Not one of ours: let kernel handle it */
@@ -399,7 +404,7 @@ no_kprobe:
399/* 404/*
400 * Called when we hit the probe point at kretprobe_trampoline 405 * Called when we hit the probe point at kretprobe_trampoline
401 */ 406 */
402int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 407int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
403{ 408{
404 struct kretprobe_instance *ri = NULL; 409 struct kretprobe_instance *ri = NULL;
405 struct hlist_head *head; 410 struct hlist_head *head;
@@ -478,7 +483,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
478 * that is atop the stack is the address following the copied instruction. 483 * that is atop the stack is the address following the copied instruction.
479 * We need to make it the address following the original instruction. 484 * We need to make it the address following the original instruction.
480 */ 485 */
481static void resume_execution(struct kprobe *p, struct pt_regs *regs) 486static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
482{ 487{
483 unsigned long *tos = (unsigned long *)regs->rsp; 488 unsigned long *tos = (unsigned long *)regs->rsp;
484 unsigned long next_rip = 0; 489 unsigned long next_rip = 0;
@@ -536,7 +541,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
536 * Interrupts are disabled on entry as trap1 is an interrupt gate and they 541 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
537 * remain disabled thoroughout this function. And we hold kprobe lock. 542 * remain disabled thoroughout this function. And we hold kprobe lock.
538 */ 543 */
539int post_kprobe_handler(struct pt_regs *regs) 544int __kprobes post_kprobe_handler(struct pt_regs *regs)
540{ 545{
541 if (!kprobe_running()) 546 if (!kprobe_running())
542 return 0; 547 return 0;
@@ -571,7 +576,7 @@ out:
571} 576}
572 577
573/* Interrupts disabled, kprobe_lock held. */ 578/* Interrupts disabled, kprobe_lock held. */
574int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 579int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
575{ 580{
576 if (current_kprobe->fault_handler 581 if (current_kprobe->fault_handler
577 && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) 582 && current_kprobe->fault_handler(current_kprobe, regs, trapnr))
@@ -590,8 +595,8 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
590/* 595/*
591 * Wrapper routine for handling exceptions. 596 * Wrapper routine for handling exceptions.
592 */ 597 */
593int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, 598int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
594 void *data) 599 unsigned long val, void *data)
595{ 600{
596 struct die_args *args = (struct die_args *)data; 601 struct die_args *args = (struct die_args *)data;
597 switch (val) { 602 switch (val) {
@@ -619,7 +624,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
619 return NOTIFY_DONE; 624 return NOTIFY_DONE;
620} 625}
621 626
622int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 627int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
623{ 628{
624 struct jprobe *jp = container_of(p, struct jprobe, kp); 629 struct jprobe *jp = container_of(p, struct jprobe, kp);
625 unsigned long addr; 630 unsigned long addr;
@@ -640,7 +645,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
640 return 1; 645 return 1;
641} 646}
642 647
643void jprobe_return(void) 648void __kprobes jprobe_return(void)
644{ 649{
645 preempt_enable_no_resched(); 650 preempt_enable_no_resched();
646 asm volatile (" xchg %%rbx,%%rsp \n" 651 asm volatile (" xchg %%rbx,%%rsp \n"
@@ -651,7 +656,7 @@ void jprobe_return(void)
651 (jprobe_saved_rsp):"memory"); 656 (jprobe_saved_rsp):"memory");
652} 657}
653 658
654int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 659int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
655{ 660{
656 u8 *addr = (u8 *) (regs->rip - 1); 661 u8 *addr = (u8 *) (regs->rip - 1);
657 unsigned long stack_addr = (unsigned long)jprobe_saved_rsp; 662 unsigned long stack_addr = (unsigned long)jprobe_saved_rsp;
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index 64a8e05d5811..caf164959e19 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -463,6 +463,8 @@ void touch_nmi_watchdog (void)
463 */ 463 */
464 for (i = 0; i < NR_CPUS; i++) 464 for (i = 0; i < NR_CPUS; i++)
465 per_cpu(nmi_touch, i) = 1; 465 per_cpu(nmi_touch, i) = 1;
466
467 touch_softlockup_watchdog();
466} 468}
467 469
468void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason) 470void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason)
@@ -522,14 +524,14 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
522 524
523 nmi_enter(); 525 nmi_enter();
524 add_pda(__nmi_count,1); 526 add_pda(__nmi_count,1);
525 if (!nmi_callback(regs, cpu)) 527 if (!rcu_dereference(nmi_callback)(regs, cpu))
526 default_do_nmi(regs); 528 default_do_nmi(regs);
527 nmi_exit(); 529 nmi_exit();
528} 530}
529 531
530void set_nmi_callback(nmi_callback_t callback) 532void set_nmi_callback(nmi_callback_t callback)
531{ 533{
532 nmi_callback = callback; 534 rcu_assign_pointer(nmi_callback, callback);
533} 535}
534 536
535void unset_nmi_callback(void) 537void unset_nmi_callback(void)
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 7577f9d7a75d..8661f82ac70b 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -310,6 +310,7 @@ void __show_regs(struct pt_regs * regs)
310 310
311void show_regs(struct pt_regs *regs) 311void show_regs(struct pt_regs *regs)
312{ 312{
313 printk("CPU %d:", smp_processor_id());
313 __show_regs(regs); 314 __show_regs(regs);
314 show_trace(&regs->rsp); 315 show_trace(&regs->rsp);
315} 316}
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 116a491e2961..b356f8e6adfe 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -65,7 +65,7 @@
65 * Machine setup.. 65 * Machine setup..
66 */ 66 */
67 67
68struct cpuinfo_x86 boot_cpu_data; 68struct cpuinfo_x86 boot_cpu_data __read_mostly;
69 69
70unsigned long mmu_cr4_features; 70unsigned long mmu_cr4_features;
71 71
diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c
index 34082c1cc41e..e3ffcacc8c90 100644
--- a/arch/x86_64/kernel/setup64.c
+++ b/arch/x86_64/kernel/setup64.c
@@ -36,7 +36,7 @@ struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
36 36
37char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); 37char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
38 38
39unsigned long __supported_pte_mask = ~0UL; 39unsigned long __supported_pte_mask __read_mostly = ~0UL;
40static int do_not_nx __initdata = 0; 40static int do_not_nx __initdata = 0;
41 41
42/* noexec=on|off 42/* noexec=on|off
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index fa25e39fe54d..90aeccd15190 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -62,13 +62,13 @@
62/* Number of siblings per CPU package */ 62/* Number of siblings per CPU package */
63int smp_num_siblings = 1; 63int smp_num_siblings = 1;
64/* Package ID of each logical CPU */ 64/* Package ID of each logical CPU */
65u8 phys_proc_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 65u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
66u8 cpu_core_id[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 66u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
67EXPORT_SYMBOL(phys_proc_id); 67EXPORT_SYMBOL(phys_proc_id);
68EXPORT_SYMBOL(cpu_core_id); 68EXPORT_SYMBOL(cpu_core_id);
69 69
70/* Bitmask of currently online CPUs */ 70/* Bitmask of currently online CPUs */
71cpumask_t cpu_online_map; 71cpumask_t cpu_online_map __read_mostly;
72 72
73EXPORT_SYMBOL(cpu_online_map); 73EXPORT_SYMBOL(cpu_online_map);
74 74
@@ -88,8 +88,8 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
88/* Set when the idlers are all forked */ 88/* Set when the idlers are all forked */
89int smp_threads_ready; 89int smp_threads_ready;
90 90
91cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 91cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
92cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 92cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
93EXPORT_SYMBOL(cpu_core_map); 93EXPORT_SYMBOL(cpu_core_map);
94 94
95/* 95/*
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index 66bf6ddeb0c3..7b6abe058253 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -176,10 +176,7 @@ int do_settimeofday(struct timespec *tv)
176 set_normalized_timespec(&xtime, sec, nsec); 176 set_normalized_timespec(&xtime, sec, nsec);
177 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 177 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
178 178
179 time_adjust = 0; /* stop active adjtime() */ 179 ntp_clear();
180 time_status |= STA_UNSYNC;
181 time_maxerror = NTP_PHASE_LIMIT;
182 time_esterror = NTP_PHASE_LIMIT;
183 180
184 write_sequnlock_irq(&xtime_lock); 181 write_sequnlock_irq(&xtime_lock);
185 clock_was_set(); 182 clock_was_set();
@@ -471,7 +468,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
471 * off) isn't likely to go away much sooner anyway. 468 * off) isn't likely to go away much sooner anyway.
472 */ 469 */
473 470
474 if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update && 471 if (ntp_synced() && xtime.tv_sec > rtc_update &&
475 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) { 472 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
476 set_rtc_mmss(xtime.tv_sec); 473 set_rtc_mmss(xtime.tv_sec);
477 rtc_update = xtime.tv_sec + 660; 474 rtc_update = xtime.tv_sec + 660;
@@ -1041,6 +1038,7 @@ static int timer_resume(struct sys_device *dev)
1041 write_sequnlock_irqrestore(&xtime_lock,flags); 1038 write_sequnlock_irqrestore(&xtime_lock,flags);
1042 jiffies += sleep_length; 1039 jiffies += sleep_length;
1043 wall_jiffies += sleep_length; 1040 wall_jiffies += sleep_length;
1041 touch_softlockup_watchdog();
1044 return 0; 1042 return 0;
1045} 1043}
1046 1044
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6ead433a3885..f238d6078a5a 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/nmi.h> 31#include <linux/nmi.h>
32#include <linux/kprobes.h>
32 33
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -419,8 +420,9 @@ void die_nmi(char *str, struct pt_regs *regs)
419 do_exit(SIGSEGV); 420 do_exit(SIGSEGV);
420} 421}
421 422
422static void do_trap(int trapnr, int signr, char *str, 423static void __kprobes do_trap(int trapnr, int signr, char *str,
423 struct pt_regs * regs, long error_code, siginfo_t *info) 424 struct pt_regs * regs, long error_code,
425 siginfo_t *info)
424{ 426{
425 conditional_sti(regs); 427 conditional_sti(regs);
426 428
@@ -504,7 +506,8 @@ DO_ERROR(18, SIGSEGV, "reserved", reserved)
504DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 506DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
505DO_ERROR( 8, SIGSEGV, "double fault", double_fault) 507DO_ERROR( 8, SIGSEGV, "double fault", double_fault)
506 508
507asmlinkage void do_general_protection(struct pt_regs * regs, long error_code) 509asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
510 long error_code)
508{ 511{
509 conditional_sti(regs); 512 conditional_sti(regs);
510 513
@@ -622,7 +625,7 @@ asmlinkage void default_do_nmi(struct pt_regs *regs)
622 io_check_error(reason, regs); 625 io_check_error(reason, regs);
623} 626}
624 627
625asmlinkage void do_int3(struct pt_regs * regs, long error_code) 628asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
626{ 629{
627 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { 630 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
628 return; 631 return;
@@ -653,7 +656,8 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
653} 656}
654 657
655/* runs on IST stack. */ 658/* runs on IST stack. */
656asmlinkage void do_debug(struct pt_regs * regs, unsigned long error_code) 659asmlinkage void __kprobes do_debug(struct pt_regs * regs,
660 unsigned long error_code)
657{ 661{
658 unsigned long condition; 662 unsigned long condition;
659 struct task_struct *tsk = current; 663 struct task_struct *tsk = current;
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 2a94f9b60b2d..d4abb07af52d 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -21,6 +21,7 @@ SECTIONS
21 *(.text) 21 *(.text)
22 SCHED_TEXT 22 SCHED_TEXT
23 LOCK_TEXT 23 LOCK_TEXT
24 KPROBES_TEXT
24 *(.fixup) 25 *(.fixup)
25 *(.gnu.warning) 26 *(.gnu.warning)
26 } = 0x9090 27 } = 0x9090
diff --git a/arch/x86_64/mm/fault.c b/arch/x86_64/mm/fault.c
index ca914c3bd49c..816732d8858c 100644
--- a/arch/x86_64/mm/fault.c
+++ b/arch/x86_64/mm/fault.c
@@ -23,6 +23,7 @@
23#include <linux/vt_kern.h> /* For unblank_screen() */ 23#include <linux/vt_kern.h> /* For unblank_screen() */
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/kprobes.h>
26 27
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
@@ -294,7 +295,8 @@ int exception_trace = 1;
294 * bit 2 == 0 means kernel, 1 means user-mode 295 * bit 2 == 0 means kernel, 1 means user-mode
295 * bit 3 == 1 means fault was an instruction fetch 296 * bit 3 == 1 means fault was an instruction fetch
296 */ 297 */
297asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) 298asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
299 unsigned long error_code)
298{ 300{
299 struct task_struct *tsk; 301 struct task_struct *tsk;
300 struct mm_struct *mm; 302 struct mm_struct *mm;
diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c
index 6a156f5692ae..04f7a33e144c 100644
--- a/arch/x86_64/mm/numa.c
+++ b/arch/x86_64/mm/numa.c
@@ -22,14 +22,14 @@
22#define Dprintk(x...) 22#define Dprintk(x...)
23#endif 23#endif
24 24
25struct pglist_data *node_data[MAX_NUMNODES]; 25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26bootmem_data_t plat_node_bdata[MAX_NUMNODES]; 26bootmem_data_t plat_node_bdata[MAX_NUMNODES];
27 27
28int memnode_shift; 28int memnode_shift;
29u8 memnodemap[NODEMAPSIZE]; 29u8 memnodemap[NODEMAPSIZE];
30 30
31unsigned char cpu_to_node[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; 31unsigned char cpu_to_node[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
32cpumask_t node_to_cpumask[MAX_NUMNODES]; 32cpumask_t node_to_cpumask[MAX_NUMNODES] __read_mostly;
33 33
34int numa_off __initdata; 34int numa_off __initdata;
35 35
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index e07287db5a40..1ac7d5ce7456 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -122,10 +122,7 @@ int do_settimeofday(struct timespec *tv)
122 set_normalized_timespec(&xtime, sec, nsec); 122 set_normalized_timespec(&xtime, sec, nsec);
123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); 123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
124 124
125 time_adjust = 0; /* stop active adjtime() */ 125 ntp_clear();
126 time_status |= STA_UNSYNC;
127 time_maxerror = NTP_PHASE_LIMIT;
128 time_esterror = NTP_PHASE_LIMIT;
129 write_sequnlock_irq(&xtime_lock); 126 write_sequnlock_irq(&xtime_lock);
130 return 0; 127 return 0;
131} 128}
@@ -184,7 +181,7 @@ again:
184 next += CCOUNT_PER_JIFFY; 181 next += CCOUNT_PER_JIFFY;
185 do_timer (regs); /* Linux handler in kernel/timer.c */ 182 do_timer (regs); /* Linux handler in kernel/timer.c */
186 183
187 if ((time_status & STA_UNSYNC) == 0 && 184 if (ntp_synced() &&
188 xtime.tv_sec - last_rtc_update >= 659 && 185 xtime.tv_sec - last_rtc_update >= 659 &&
189 abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ && 186 abs((xtime.tv_nsec/1000)-(1000000-1000000/HZ))<5000000/HZ &&
190 jiffies - wall_jiffies == 1) { 187 jiffies - wall_jiffies == 1) {
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 3df47f93c9db..dfd4bcfc5975 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -191,6 +191,8 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
191 u8 *iv = desc->info; 191 u8 *iv = desc->info;
192 unsigned int done = 0; 192 unsigned int done = 0;
193 193
194 nbytes -= bsize;
195
194 do { 196 do {
195 xor(iv, src); 197 xor(iv, src);
196 fn(crypto_tfm_ctx(tfm), dst, iv); 198 fn(crypto_tfm_ctx(tfm), dst, iv);
@@ -198,7 +200,7 @@ static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
198 200
199 src += bsize; 201 src += bsize;
200 dst += bsize; 202 dst += bsize;
201 } while ((done += bsize) < nbytes); 203 } while ((done += bsize) <= nbytes);
202 204
203 return done; 205 return done;
204} 206}
@@ -219,6 +221,8 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
219 u8 *iv = desc->info; 221 u8 *iv = desc->info;
220 unsigned int done = 0; 222 unsigned int done = 0;
221 223
224 nbytes -= bsize;
225
222 do { 226 do {
223 u8 *tmp_dst = *dst_p; 227 u8 *tmp_dst = *dst_p;
224 228
@@ -230,7 +234,7 @@ static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
230 234
231 src += bsize; 235 src += bsize;
232 dst += bsize; 236 dst += bsize;
233 } while ((done += bsize) < nbytes); 237 } while ((done += bsize) <= nbytes);
234 238
235 return done; 239 return done;
236} 240}
@@ -243,12 +247,14 @@ static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
243 void (*fn)(void *, u8 *, const u8 *) = desc->crfn; 247 void (*fn)(void *, u8 *, const u8 *) = desc->crfn;
244 unsigned int done = 0; 248 unsigned int done = 0;
245 249
250 nbytes -= bsize;
251
246 do { 252 do {
247 fn(crypto_tfm_ctx(tfm), dst, src); 253 fn(crypto_tfm_ctx(tfm), dst, src);
248 254
249 src += bsize; 255 src += bsize;
250 dst += bsize; 256 dst += bsize;
251 } while ((done += bsize) < nbytes); 257 } while ((done += bsize) <= nbytes);
252 258
253 return done; 259 return done;
254} 260}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 652281402c92..5bfa2e9a7c26 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -28,6 +28,7 @@ enum {
28 FW_STATUS_DONE, 28 FW_STATUS_DONE,
29 FW_STATUS_ABORT, 29 FW_STATUS_ABORT,
30 FW_STATUS_READY, 30 FW_STATUS_READY,
31 FW_STATUS_READY_NOHOTPLUG,
31}; 32};
32 33
33static int loading_timeout = 10; /* In seconds */ 34static int loading_timeout = 10; /* In seconds */
@@ -344,7 +345,7 @@ error_kfree:
344 345
345static int 346static int
346fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p, 347fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p,
347 const char *fw_name, struct device *device) 348 const char *fw_name, struct device *device, int hotplug)
348{ 349{
349 struct class_device *class_dev; 350 struct class_device *class_dev;
350 struct firmware_priv *fw_priv; 351 struct firmware_priv *fw_priv;
@@ -376,7 +377,10 @@ fw_setup_class_device(struct firmware *fw, struct class_device **class_dev_p,
376 goto error_unreg; 377 goto error_unreg;
377 } 378 }
378 379
379 set_bit(FW_STATUS_READY, &fw_priv->status); 380 if (hotplug)
381 set_bit(FW_STATUS_READY, &fw_priv->status);
382 else
383 set_bit(FW_STATUS_READY_NOHOTPLUG, &fw_priv->status);
380 *class_dev_p = class_dev; 384 *class_dev_p = class_dev;
381 goto out; 385 goto out;
382 386
@@ -386,21 +390,9 @@ out:
386 return retval; 390 return retval;
387} 391}
388 392
389/** 393static int
390 * request_firmware: - request firmware to hotplug and wait for it 394_request_firmware(const struct firmware **firmware_p, const char *name,
391 * Description: 395 struct device *device, int hotplug)
392 * @firmware will be used to return a firmware image by the name
393 * of @name for device @device.
394 *
395 * Should be called from user context where sleeping is allowed.
396 *
397 * @name will be use as $FIRMWARE in the hotplug environment and
398 * should be distinctive enough not to be confused with any other
399 * firmware image for this or any other device.
400 **/
401int
402request_firmware(const struct firmware **firmware_p, const char *name,
403 struct device *device)
404{ 396{
405 struct class_device *class_dev; 397 struct class_device *class_dev;
406 struct firmware_priv *fw_priv; 398 struct firmware_priv *fw_priv;
@@ -419,22 +411,25 @@ request_firmware(const struct firmware **firmware_p, const char *name,
419 } 411 }
420 memset(firmware, 0, sizeof (*firmware)); 412 memset(firmware, 0, sizeof (*firmware));
421 413
422 retval = fw_setup_class_device(firmware, &class_dev, name, device); 414 retval = fw_setup_class_device(firmware, &class_dev, name, device,
415 hotplug);
423 if (retval) 416 if (retval)
424 goto error_kfree_fw; 417 goto error_kfree_fw;
425 418
426 fw_priv = class_get_devdata(class_dev); 419 fw_priv = class_get_devdata(class_dev);
427 420
428 if (loading_timeout > 0) { 421 if (hotplug) {
429 fw_priv->timeout.expires = jiffies + loading_timeout * HZ; 422 if (loading_timeout > 0) {
430 add_timer(&fw_priv->timeout); 423 fw_priv->timeout.expires = jiffies + loading_timeout * HZ;
431 } 424 add_timer(&fw_priv->timeout);
432 425 }
433 kobject_hotplug(&class_dev->kobj, KOBJ_ADD);
434 wait_for_completion(&fw_priv->completion);
435 set_bit(FW_STATUS_DONE, &fw_priv->status);
436 426
437 del_timer_sync(&fw_priv->timeout); 427 kobject_hotplug(&class_dev->kobj, KOBJ_ADD);
428 wait_for_completion(&fw_priv->completion);
429 set_bit(FW_STATUS_DONE, &fw_priv->status);
430 del_timer_sync(&fw_priv->timeout);
431 } else
432 wait_for_completion(&fw_priv->completion);
438 433
439 down(&fw_lock); 434 down(&fw_lock);
440 if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) { 435 if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status)) {
@@ -455,6 +450,26 @@ out:
455} 450}
456 451
457/** 452/**
453 * request_firmware: - request firmware to hotplug and wait for it
454 * Description:
455 * @firmware will be used to return a firmware image by the name
456 * of @name for device @device.
457 *
458 * Should be called from user context where sleeping is allowed.
459 *
460 * @name will be use as $FIRMWARE in the hotplug environment and
461 * should be distinctive enough not to be confused with any other
462 * firmware image for this or any other device.
463 **/
464int
465request_firmware(const struct firmware **firmware_p, const char *name,
466 struct device *device)
467{
468 int hotplug = 1;
469 return _request_firmware(firmware_p, name, device, hotplug);
470}
471
472/**
458 * release_firmware: - release the resource associated with a firmware image 473 * release_firmware: - release the resource associated with a firmware image
459 **/ 474 **/
460void 475void
@@ -491,6 +506,7 @@ struct firmware_work {
491 struct device *device; 506 struct device *device;
492 void *context; 507 void *context;
493 void (*cont)(const struct firmware *fw, void *context); 508 void (*cont)(const struct firmware *fw, void *context);
509 int hotplug;
494}; 510};
495 511
496static int 512static int
@@ -503,7 +519,8 @@ request_firmware_work_func(void *arg)
503 return 0; 519 return 0;
504 } 520 }
505 daemonize("%s/%s", "firmware", fw_work->name); 521 daemonize("%s/%s", "firmware", fw_work->name);
506 request_firmware(&fw, fw_work->name, fw_work->device); 522 _request_firmware(&fw, fw_work->name, fw_work->device,
523 fw_work->hotplug);
507 fw_work->cont(fw, fw_work->context); 524 fw_work->cont(fw, fw_work->context);
508 release_firmware(fw); 525 release_firmware(fw);
509 module_put(fw_work->module); 526 module_put(fw_work->module);
@@ -518,6 +535,9 @@ request_firmware_work_func(void *arg)
518 * Asynchronous variant of request_firmware() for contexts where 535 * Asynchronous variant of request_firmware() for contexts where
519 * it is not possible to sleep. 536 * it is not possible to sleep.
520 * 537 *
538 * @hotplug invokes hotplug event to copy the firmware image if this flag
539 * is non-zero else the firmware copy must be done manually.
540 *
521 * @cont will be called asynchronously when the firmware request is over. 541 * @cont will be called asynchronously when the firmware request is over.
522 * 542 *
523 * @context will be passed over to @cont. 543 * @context will be passed over to @cont.
@@ -527,7 +547,7 @@ request_firmware_work_func(void *arg)
527 **/ 547 **/
528int 548int
529request_firmware_nowait( 549request_firmware_nowait(
530 struct module *module, 550 struct module *module, int hotplug,
531 const char *name, struct device *device, void *context, 551 const char *name, struct device *device, void *context,
532 void (*cont)(const struct firmware *fw, void *context)) 552 void (*cont)(const struct firmware *fw, void *context))
533{ 553{
@@ -548,6 +568,7 @@ request_firmware_nowait(
548 .device = device, 568 .device = device,
549 .context = context, 569 .context = context,
550 .cont = cont, 570 .cont = cont,
571 .hotplug = hotplug,
551 }; 572 };
552 573
553 ret = kernel_thread(request_firmware_work_func, fw_work, 574 ret = kernel_thread(request_firmware_work_func, fw_work,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 6b736364cc5b..51b0af1cebee 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -6,7 +6,7 @@ menu "Block devices"
6 6
7config BLK_DEV_FD 7config BLK_DEV_FD
8 tristate "Normal floppy disk support" 8 tristate "Normal floppy disk support"
9 depends on (!ARCH_S390 && !M68K && !IA64 && !UML && !ARM) || Q40 || (SUN3X && BROKEN) || ARCH_RPC || ARCH_EBSA285 9 depends on ARCH_MAY_HAVE_PC_FDC
10 ---help--- 10 ---help---
11 If you want to use the floppy disk drive(s) of your PC under Linux, 11 If you want to use the floppy disk drive(s) of your PC under Linux,
12 say Y. Information about this driver, especially important for IBM 12 say Y. Information about this driver, especially important for IBM
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 6e231c5a1199..ded33ba31acc 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -35,7 +35,7 @@ aoedev_newdev(ulong nframes)
35 struct aoedev *d; 35 struct aoedev *d;
36 struct frame *f, *e; 36 struct frame *f, *e;
37 37
38 d = kcalloc(1, sizeof *d, GFP_ATOMIC); 38 d = kzalloc(sizeof *d, GFP_ATOMIC);
39 if (d == NULL) 39 if (d == NULL)
40 return NULL; 40 return NULL;
41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); 41 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
index cd056e7e64ec..30c0903c7cdd 100644
--- a/drivers/block/cfq-iosched.c
+++ b/drivers/block/cfq-iosched.c
@@ -2260,8 +2260,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
2260 if (!atomic_dec_and_test(&cfqd->ref)) 2260 if (!atomic_dec_and_test(&cfqd->ref))
2261 return; 2261 return;
2262 2262
2263 blk_put_queue(q);
2264
2265 cfq_shutdown_timer_wq(cfqd); 2263 cfq_shutdown_timer_wq(cfqd);
2266 q->elevator->elevator_data = NULL; 2264 q->elevator->elevator_data = NULL;
2267 2265
@@ -2318,7 +2316,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
2318 e->elevator_data = cfqd; 2316 e->elevator_data = cfqd;
2319 2317
2320 cfqd->queue = q; 2318 cfqd->queue = q;
2321 atomic_inc(&q->refcnt);
2322 2319
2323 cfqd->max_queued = q->nr_requests / 4; 2320 cfqd->max_queued = q->nr_requests / 4;
2324 q->nr_batching = cfq_queued; 2321 q->nr_batching = cfq_queued;
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
index ff5201e02153..24594c57c323 100644
--- a/drivers/block/deadline-iosched.c
+++ b/drivers/block/deadline-iosched.c
@@ -507,18 +507,12 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
507 const int reads = !list_empty(&dd->fifo_list[READ]); 507 const int reads = !list_empty(&dd->fifo_list[READ]);
508 const int writes = !list_empty(&dd->fifo_list[WRITE]); 508 const int writes = !list_empty(&dd->fifo_list[WRITE]);
509 struct deadline_rq *drq; 509 struct deadline_rq *drq;
510 int data_dir, other_dir; 510 int data_dir;
511 511
512 /* 512 /*
513 * batches are currently reads XOR writes 513 * batches are currently reads XOR writes
514 */ 514 */
515 drq = NULL; 515 drq = dd->next_drq[WRITE] ? : dd->next_drq[READ];
516
517 if (dd->next_drq[READ])
518 drq = dd->next_drq[READ];
519
520 if (dd->next_drq[WRITE])
521 drq = dd->next_drq[WRITE];
522 516
523 if (drq) { 517 if (drq) {
524 /* we have a "next request" */ 518 /* we have a "next request" */
@@ -544,7 +538,6 @@ static int deadline_dispatch_requests(struct deadline_data *dd)
544 goto dispatch_writes; 538 goto dispatch_writes;
545 539
546 data_dir = READ; 540 data_dir = READ;
547 other_dir = WRITE;
548 541
549 goto dispatch_find_request; 542 goto dispatch_find_request;
550 } 543 }
@@ -560,7 +553,6 @@ dispatch_writes:
560 dd->starved = 0; 553 dd->starved = 0;
561 554
562 data_dir = WRITE; 555 data_dir = WRITE;
563 other_dir = READ;
564 556
565 goto dispatch_find_request; 557 goto dispatch_find_request;
566 } 558 }
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
index 47fd3659a061..d42840cc0d1d 100644
--- a/drivers/block/genhd.c
+++ b/drivers/block/genhd.c
@@ -45,7 +45,7 @@ int get_blkdev_list(char *p, int used)
45 struct blk_major_name *n; 45 struct blk_major_name *n;
46 int i, len; 46 int i, len;
47 47
48 len = sprintf(p, "\nBlock devices:\n"); 48 len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n");
49 49
50 down(&block_subsys_sem); 50 down(&block_subsys_sem);
51 for (i = 0; i < ARRAY_SIZE(major_names); i++) { 51 for (i = 0; i < ARRAY_SIZE(major_names); i++) {
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 3c818544475e..b4b17958d101 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -235,8 +235,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
235 * set defaults 235 * set defaults
236 */ 236 */
237 q->nr_requests = BLKDEV_MAX_RQ; 237 q->nr_requests = BLKDEV_MAX_RQ;
238 q->max_phys_segments = MAX_PHYS_SEGMENTS; 238 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
239 q->max_hw_segments = MAX_HW_SEGMENTS; 239 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
240 q->make_request_fn = mfn; 240 q->make_request_fn = mfn;
241 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 241 q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
242 q->backing_dev_info.state = 0; 242 q->backing_dev_info.state = 0;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index a1de06d76de6..2bc9d64db106 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -138,7 +138,7 @@ config CYZ_INTR
138 138
139config DIGIEPCA 139config DIGIEPCA
140 tristate "Digiboard Intelligent Async Support" 140 tristate "Digiboard Intelligent Async Support"
141 depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP && (!64BIT || BROKEN) 141 depends on SERIAL_NONSTANDARD
142 ---help--- 142 ---help---
143 This is a driver for Digi International's Xx, Xeve, and Xem series 143 This is a driver for Digi International's Xx, Xeve, and Xem series
144 of cards which provide multiple serial ports. You would need 144 of cards which provide multiple serial ports. You would need
diff --git a/drivers/char/digi1.h b/drivers/char/digi1.h
index 184378d23f8c..94d4eab5d3ca 100644
--- a/drivers/char/digi1.h
+++ b/drivers/char/digi1.h
@@ -1,46 +1,46 @@
1/* Definitions for DigiBoard ditty(1) command. */ 1/* Definitions for DigiBoard ditty(1) command. */
2 2
3#if !defined(TIOCMODG) 3#if !defined(TIOCMODG)
4#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */ 4#define TIOCMODG (('d'<<8) | 250) /* get modem ctrl state */
5#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */ 5#define TIOCMODS (('d'<<8) | 251) /* set modem ctrl state */
6#endif 6#endif
7 7
8#if !defined(TIOCMSET) 8#if !defined(TIOCMSET)
9#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */ 9#define TIOCMSET (('d'<<8) | 252) /* set modem ctrl state */
10#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */ 10#define TIOCMGET (('d'<<8) | 253) /* set modem ctrl state */
11#endif 11#endif
12 12
13#if !defined(TIOCMBIC) 13#if !defined(TIOCMBIC)
14#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */ 14#define TIOCMBIC (('d'<<8) | 254) /* set modem ctrl state */
15#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */ 15#define TIOCMBIS (('d'<<8) | 255) /* set modem ctrl state */
16#endif 16#endif
17 17
18#if !defined(TIOCSDTR) 18#if !defined(TIOCSDTR)
19#define TIOCSDTR ('e'<<8) | 0 /* set DTR */ 19#define TIOCSDTR (('e'<<8) | 0) /* set DTR */
20#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */ 20#define TIOCCDTR (('e'<<8) | 1) /* clear DTR */
21#endif 21#endif
22 22
23/************************************************************************ 23/************************************************************************
24 * Ioctl command arguments for DIGI parameters. 24 * Ioctl command arguments for DIGI parameters.
25 ************************************************************************/ 25 ************************************************************************/
26#define DIGI_GETA ('e'<<8) | 94 /* Read params */ 26#define DIGI_GETA (('e'<<8) | 94) /* Read params */
27 27
28#define DIGI_SETA ('e'<<8) | 95 /* Set params */ 28#define DIGI_SETA (('e'<<8) | 95) /* Set params */
29#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */ 29#define DIGI_SETAW (('e'<<8) | 96) /* Drain & set params */
30#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */ 30#define DIGI_SETAF (('e'<<8) | 97) /* Drain, flush & set params */
31 31
32#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */ 32#define DIGI_GETFLOW (('e'<<8) | 99) /* Get startc/stopc flow */
33 /* control characters */ 33 /* control characters */
34#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */ 34#define DIGI_SETFLOW (('e'<<8) | 100) /* Set startc/stopc flow */
35 /* control characters */ 35 /* control characters */
36#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */ 36#define DIGI_GETAFLOW (('e'<<8) | 101) /* Get Aux. startc/stopc */
37 /* flow control chars */ 37 /* flow control chars */
38#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */ 38#define DIGI_SETAFLOW (('e'<<8) | 102) /* Set Aux. startc/stopc */
39 /* flow control chars */ 39 /* flow control chars */
40 40
41#define DIGI_GETINFO ('e'<<8) | 103 /* Fill in digi_info */ 41#define DIGI_GETINFO (('e'<<8) | 103) /* Fill in digi_info */
42#define DIGI_POLLER ('e'<<8) | 104 /* Turn on/off poller */ 42#define DIGI_POLLER (('e'<<8) | 104) /* Turn on/off poller */
43#define DIGI_INIT ('e'<<8) | 105 /* Allow things to run. */ 43#define DIGI_INIT (('e'<<8) | 105) /* Allow things to run. */
44 44
45struct digiflow_struct 45struct digiflow_struct
46{ 46{
diff --git a/drivers/char/digiFep1.h b/drivers/char/digiFep1.h
index c47d7fcb8400..3c1f1922c798 100644
--- a/drivers/char/digiFep1.h
+++ b/drivers/char/digiFep1.h
@@ -13,88 +13,88 @@
13 13
14struct global_data 14struct global_data
15{ 15{
16 volatile ushort cin; 16 u16 cin;
17 volatile ushort cout; 17 u16 cout;
18 volatile ushort cstart; 18 u16 cstart;
19 volatile ushort cmax; 19 u16 cmax;
20 volatile ushort ein; 20 u16 ein;
21 volatile ushort eout; 21 u16 eout;
22 volatile ushort istart; 22 u16 istart;
23 volatile ushort imax; 23 u16 imax;
24}; 24};
25 25
26 26
27struct board_chan 27struct board_chan
28{ 28{
29 int filler1; 29 u32 filler1;
30 int filler2; 30 u32 filler2;
31 volatile ushort tseg; 31 u16 tseg;
32 volatile ushort tin; 32 u16 tin;
33 volatile ushort tout; 33 u16 tout;
34 volatile ushort tmax; 34 u16 tmax;
35 35
36 volatile ushort rseg; 36 u16 rseg;
37 volatile ushort rin; 37 u16 rin;
38 volatile ushort rout; 38 u16 rout;
39 volatile ushort rmax; 39 u16 rmax;
40 40
41 volatile ushort tlow; 41 u16 tlow;
42 volatile ushort rlow; 42 u16 rlow;
43 volatile ushort rhigh; 43 u16 rhigh;
44 volatile ushort incr; 44 u16 incr;
45 45
46 volatile ushort etime; 46 u16 etime;
47 volatile ushort edelay; 47 u16 edelay;
48 volatile unchar *dev; 48 unchar *dev;
49 49
50 volatile ushort iflag; 50 u16 iflag;
51 volatile ushort oflag; 51 u16 oflag;
52 volatile ushort cflag; 52 u16 cflag;
53 volatile ushort gmask; 53 u16 gmask;
54 54
55 volatile ushort col; 55 u16 col;
56 volatile ushort delay; 56 u16 delay;
57 volatile ushort imask; 57 u16 imask;
58 volatile ushort tflush; 58 u16 tflush;
59 59
60 int filler3; 60 u32 filler3;
61 int filler4; 61 u32 filler4;
62 int filler5; 62 u32 filler5;
63 int filler6; 63 u32 filler6;
64 64
65 volatile unchar num; 65 u8 num;
66 volatile unchar ract; 66 u8 ract;
67 volatile unchar bstat; 67 u8 bstat;
68 volatile unchar tbusy; 68 u8 tbusy;
69 volatile unchar iempty; 69 u8 iempty;
70 volatile unchar ilow; 70 u8 ilow;
71 volatile unchar idata; 71 u8 idata;
72 volatile unchar eflag; 72 u8 eflag;
73 73
74 volatile unchar tflag; 74 u8 tflag;
75 volatile unchar rflag; 75 u8 rflag;
76 volatile unchar xmask; 76 u8 xmask;
77 volatile unchar xval; 77 u8 xval;
78 volatile unchar mstat; 78 u8 mstat;
79 volatile unchar mchange; 79 u8 mchange;
80 volatile unchar mint; 80 u8 mint;
81 volatile unchar lstat; 81 u8 lstat;
82 82
83 volatile unchar mtran; 83 u8 mtran;
84 volatile unchar orun; 84 u8 orun;
85 volatile unchar startca; 85 u8 startca;
86 volatile unchar stopca; 86 u8 stopca;
87 volatile unchar startc; 87 u8 startc;
88 volatile unchar stopc; 88 u8 stopc;
89 volatile unchar vnext; 89 u8 vnext;
90 volatile unchar hflow; 90 u8 hflow;
91 91
92 volatile unchar fillc; 92 u8 fillc;
93 volatile unchar ochar; 93 u8 ochar;
94 volatile unchar omask; 94 u8 omask;
95 95
96 unchar filler7; 96 u8 filler7;
97 unchar filler8[28]; 97 u8 filler8[28];
98}; 98};
99 99
100 100
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 6025e1866c7e..58d3738a2b7f 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -6,6 +6,8 @@
6 For technical support please email digiLinux@dgii.com or 6 For technical support please email digiLinux@dgii.com or
7 call Digi tech support at (612) 912-3456 7 call Digi tech support at (612) 912-3456
8 8
9 ** This driver is no longer supported by Digi **
10
9 Much of this design and code came from epca.c which was 11 Much of this design and code came from epca.c which was
10 copyright (C) 1994, 1995 Troy De Jongh, and subsquently 12 copyright (C) 1994, 1995 Troy De Jongh, and subsquently
11 modified by David Nugent, Christoph Lameter, Mike McLagan. 13 modified by David Nugent, Christoph Lameter, Mike McLagan.
@@ -43,31 +45,19 @@
43#include <linux/interrupt.h> 45#include <linux/interrupt.h>
44#include <asm/uaccess.h> 46#include <asm/uaccess.h>
45#include <asm/io.h> 47#include <asm/io.h>
46 48#include <linux/spinlock.h>
47#ifdef CONFIG_PCI
48#define ENABLE_PCI
49#endif /* CONFIG_PCI */
50
51#define putUser(arg1, arg2) put_user(arg1, (unsigned long __user *)arg2)
52#define getUser(arg1, arg2) get_user(arg1, (unsigned __user *)arg2)
53
54#ifdef ENABLE_PCI
55#include <linux/pci.h> 49#include <linux/pci.h>
56#include "digiPCI.h" 50#include "digiPCI.h"
57#endif /* ENABLE_PCI */ 51
58 52
59#include "digi1.h" 53#include "digi1.h"
60#include "digiFep1.h" 54#include "digiFep1.h"
61#include "epca.h" 55#include "epca.h"
62#include "epcaconfig.h" 56#include "epcaconfig.h"
63 57
64#if BITS_PER_LONG != 32
65# error FIXME: this driver only works on 32-bit platforms
66#endif
67
68/* ---------------------- Begin defines ------------------------ */ 58/* ---------------------- Begin defines ------------------------ */
69 59
70#define VERSION "1.3.0.1-LK" 60#define VERSION "1.3.0.1-LK2.6"
71 61
72/* This major needs to be submitted to Linux to join the majors list */ 62/* This major needs to be submitted to Linux to join the majors list */
73 63
@@ -81,13 +71,17 @@
81 71
82/* ----------------- Begin global definitions ------------------- */ 72/* ----------------- Begin global definitions ------------------- */
83 73
84static char mesg[100];
85static int nbdevs, num_cards, liloconfig; 74static int nbdevs, num_cards, liloconfig;
86static int digi_poller_inhibited = 1 ; 75static int digi_poller_inhibited = 1 ;
87 76
88static int setup_error_code; 77static int setup_error_code;
89static int invalid_lilo_config; 78static int invalid_lilo_config;
90 79
80/* The ISA boards do window flipping into the same spaces so its only sane
81 with a single lock. It's still pretty efficient */
82
83static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED;
84
91/* ----------------------------------------------------------------------- 85/* -----------------------------------------------------------------------
92 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 86 MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
93 7 below. 87 7 below.
@@ -129,58 +123,58 @@ static struct timer_list epca_timer;
129 configured. 123 configured.
130----------------------------------------------------------------------- */ 124----------------------------------------------------------------------- */
131 125
132static inline void memwinon(struct board_info *b, unsigned int win); 126static void memwinon(struct board_info *b, unsigned int win);
133static inline void memwinoff(struct board_info *b, unsigned int win); 127static void memwinoff(struct board_info *b, unsigned int win);
134static inline void globalwinon(struct channel *ch); 128static void globalwinon(struct channel *ch);
135static inline void rxwinon(struct channel *ch); 129static void rxwinon(struct channel *ch);
136static inline void txwinon(struct channel *ch); 130static void txwinon(struct channel *ch);
137static inline void memoff(struct channel *ch); 131static void memoff(struct channel *ch);
138static inline void assertgwinon(struct channel *ch); 132static void assertgwinon(struct channel *ch);
139static inline void assertmemoff(struct channel *ch); 133static void assertmemoff(struct channel *ch);
140 134
141/* ---- Begin more 'specific' memory functions for cx_like products --- */ 135/* ---- Begin more 'specific' memory functions for cx_like products --- */
142 136
143static inline void pcxem_memwinon(struct board_info *b, unsigned int win); 137static void pcxem_memwinon(struct board_info *b, unsigned int win);
144static inline void pcxem_memwinoff(struct board_info *b, unsigned int win); 138static void pcxem_memwinoff(struct board_info *b, unsigned int win);
145static inline void pcxem_globalwinon(struct channel *ch); 139static void pcxem_globalwinon(struct channel *ch);
146static inline void pcxem_rxwinon(struct channel *ch); 140static void pcxem_rxwinon(struct channel *ch);
147static inline void pcxem_txwinon(struct channel *ch); 141static void pcxem_txwinon(struct channel *ch);
148static inline void pcxem_memoff(struct channel *ch); 142static void pcxem_memoff(struct channel *ch);
149 143
150/* ------ Begin more 'specific' memory functions for the pcxe ------- */ 144/* ------ Begin more 'specific' memory functions for the pcxe ------- */
151 145
152static inline void pcxe_memwinon(struct board_info *b, unsigned int win); 146static void pcxe_memwinon(struct board_info *b, unsigned int win);
153static inline void pcxe_memwinoff(struct board_info *b, unsigned int win); 147static void pcxe_memwinoff(struct board_info *b, unsigned int win);
154static inline void pcxe_globalwinon(struct channel *ch); 148static void pcxe_globalwinon(struct channel *ch);
155static inline void pcxe_rxwinon(struct channel *ch); 149static void pcxe_rxwinon(struct channel *ch);
156static inline void pcxe_txwinon(struct channel *ch); 150static void pcxe_txwinon(struct channel *ch);
157static inline void pcxe_memoff(struct channel *ch); 151static void pcxe_memoff(struct channel *ch);
158 152
159/* ---- Begin more 'specific' memory functions for the pc64xe and pcxi ---- */ 153/* ---- Begin more 'specific' memory functions for the pc64xe and pcxi ---- */
160/* Note : pc64xe and pcxi share the same windowing routines */ 154/* Note : pc64xe and pcxi share the same windowing routines */
161 155
162static inline void pcxi_memwinon(struct board_info *b, unsigned int win); 156static void pcxi_memwinon(struct board_info *b, unsigned int win);
163static inline void pcxi_memwinoff(struct board_info *b, unsigned int win); 157static void pcxi_memwinoff(struct board_info *b, unsigned int win);
164static inline void pcxi_globalwinon(struct channel *ch); 158static void pcxi_globalwinon(struct channel *ch);
165static inline void pcxi_rxwinon(struct channel *ch); 159static void pcxi_rxwinon(struct channel *ch);
166static inline void pcxi_txwinon(struct channel *ch); 160static void pcxi_txwinon(struct channel *ch);
167static inline void pcxi_memoff(struct channel *ch); 161static void pcxi_memoff(struct channel *ch);
168 162
169/* - Begin 'specific' do nothing memory functions needed for some cards - */ 163/* - Begin 'specific' do nothing memory functions needed for some cards - */
170 164
171static inline void dummy_memwinon(struct board_info *b, unsigned int win); 165static void dummy_memwinon(struct board_info *b, unsigned int win);
172static inline void dummy_memwinoff(struct board_info *b, unsigned int win); 166static void dummy_memwinoff(struct board_info *b, unsigned int win);
173static inline void dummy_globalwinon(struct channel *ch); 167static void dummy_globalwinon(struct channel *ch);
174static inline void dummy_rxwinon(struct channel *ch); 168static void dummy_rxwinon(struct channel *ch);
175static inline void dummy_txwinon(struct channel *ch); 169static void dummy_txwinon(struct channel *ch);
176static inline void dummy_memoff(struct channel *ch); 170static void dummy_memoff(struct channel *ch);
177static inline void dummy_assertgwinon(struct channel *ch); 171static void dummy_assertgwinon(struct channel *ch);
178static inline void dummy_assertmemoff(struct channel *ch); 172static void dummy_assertmemoff(struct channel *ch);
179 173
180/* ------------------- Begin declare functions ----------------------- */ 174/* ------------------- Begin declare functions ----------------------- */
181 175
182static inline struct channel *verifyChannel(register struct tty_struct *); 176static struct channel *verifyChannel(struct tty_struct *);
183static inline void pc_sched_event(struct channel *, int); 177static void pc_sched_event(struct channel *, int);
184static void epca_error(int, char *); 178static void epca_error(int, char *);
185static void pc_close(struct tty_struct *, struct file *); 179static void pc_close(struct tty_struct *, struct file *);
186static void shutdown(struct channel *); 180static void shutdown(struct channel *);
@@ -215,15 +209,11 @@ static void pc_unthrottle(struct tty_struct *tty);
215static void digi_send_break(struct channel *ch, int msec); 209static void digi_send_break(struct channel *ch, int msec);
216static void setup_empty_event(struct tty_struct *tty, struct channel *ch); 210static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
217void epca_setup(char *, int *); 211void epca_setup(char *, int *);
218void console_print(const char *);
219 212
220static int get_termio(struct tty_struct *, struct termio __user *); 213static int get_termio(struct tty_struct *, struct termio __user *);
221static int pc_write(struct tty_struct *, const unsigned char *, int); 214static int pc_write(struct tty_struct *, const unsigned char *, int);
222int pc_init(void); 215static int pc_init(void);
223
224#ifdef ENABLE_PCI
225static int init_PCI(void); 216static int init_PCI(void);
226#endif /* ENABLE_PCI */
227 217
228 218
229/* ------------------------------------------------------------------ 219/* ------------------------------------------------------------------
@@ -237,41 +227,41 @@ static int init_PCI(void);
237 making direct calls deserves what they get. 227 making direct calls deserves what they get.
238-------------------------------------------------------------------- */ 228-------------------------------------------------------------------- */
239 229
240static inline void memwinon(struct board_info *b, unsigned int win) 230static void memwinon(struct board_info *b, unsigned int win)
241{ 231{
242 (b->memwinon)(b, win); 232 (b->memwinon)(b, win);
243} 233}
244 234
245static inline void memwinoff(struct board_info *b, unsigned int win) 235static void memwinoff(struct board_info *b, unsigned int win)
246{ 236{
247 (b->memwinoff)(b, win); 237 (b->memwinoff)(b, win);
248} 238}
249 239
250static inline void globalwinon(struct channel *ch) 240static void globalwinon(struct channel *ch)
251{ 241{
252 (ch->board->globalwinon)(ch); 242 (ch->board->globalwinon)(ch);
253} 243}
254 244
255static inline void rxwinon(struct channel *ch) 245static void rxwinon(struct channel *ch)
256{ 246{
257 (ch->board->rxwinon)(ch); 247 (ch->board->rxwinon)(ch);
258} 248}
259 249
260static inline void txwinon(struct channel *ch) 250static void txwinon(struct channel *ch)
261{ 251{
262 (ch->board->txwinon)(ch); 252 (ch->board->txwinon)(ch);
263} 253}
264 254
265static inline void memoff(struct channel *ch) 255static void memoff(struct channel *ch)
266{ 256{
267 (ch->board->memoff)(ch); 257 (ch->board->memoff)(ch);
268} 258}
269static inline void assertgwinon(struct channel *ch) 259static void assertgwinon(struct channel *ch)
270{ 260{
271 (ch->board->assertgwinon)(ch); 261 (ch->board->assertgwinon)(ch);
272} 262}
273 263
274static inline void assertmemoff(struct channel *ch) 264static void assertmemoff(struct channel *ch)
275{ 265{
276 (ch->board->assertmemoff)(ch); 266 (ch->board->assertmemoff)(ch);
277} 267}
@@ -281,66 +271,66 @@ static inline void assertmemoff(struct channel *ch)
281 and CX series cards. 271 and CX series cards.
282------------------------------------------------------------ */ 272------------------------------------------------------------ */
283 273
284static inline void pcxem_memwinon(struct board_info *b, unsigned int win) 274static void pcxem_memwinon(struct board_info *b, unsigned int win)
285{ 275{
286 outb_p(FEPWIN|win, (int)b->port + 1); 276 outb_p(FEPWIN|win, b->port + 1);
287} 277}
288 278
289static inline void pcxem_memwinoff(struct board_info *b, unsigned int win) 279static void pcxem_memwinoff(struct board_info *b, unsigned int win)
290{ 280{
291 outb_p(0, (int)b->port + 1); 281 outb_p(0, b->port + 1);
292} 282}
293 283
294static inline void pcxem_globalwinon(struct channel *ch) 284static void pcxem_globalwinon(struct channel *ch)
295{ 285{
296 outb_p( FEPWIN, (int)ch->board->port + 1); 286 outb_p( FEPWIN, (int)ch->board->port + 1);
297} 287}
298 288
299static inline void pcxem_rxwinon(struct channel *ch) 289static void pcxem_rxwinon(struct channel *ch)
300{ 290{
301 outb_p(ch->rxwin, (int)ch->board->port + 1); 291 outb_p(ch->rxwin, (int)ch->board->port + 1);
302} 292}
303 293
304static inline void pcxem_txwinon(struct channel *ch) 294static void pcxem_txwinon(struct channel *ch)
305{ 295{
306 outb_p(ch->txwin, (int)ch->board->port + 1); 296 outb_p(ch->txwin, (int)ch->board->port + 1);
307} 297}
308 298
309static inline void pcxem_memoff(struct channel *ch) 299static void pcxem_memoff(struct channel *ch)
310{ 300{
311 outb_p(0, (int)ch->board->port + 1); 301 outb_p(0, (int)ch->board->port + 1);
312} 302}
313 303
314/* ----------------- Begin pcxe memory window stuff ------------------ */ 304/* ----------------- Begin pcxe memory window stuff ------------------ */
315 305
316static inline void pcxe_memwinon(struct board_info *b, unsigned int win) 306static void pcxe_memwinon(struct board_info *b, unsigned int win)
317{ 307{
318 outb_p(FEPWIN | win, (int)b->port + 1); 308 outb_p(FEPWIN | win, b->port + 1);
319} 309}
320 310
321static inline void pcxe_memwinoff(struct board_info *b, unsigned int win) 311static void pcxe_memwinoff(struct board_info *b, unsigned int win)
322{ 312{
323 outb_p(inb((int)b->port) & ~FEPMEM, 313 outb_p(inb(b->port) & ~FEPMEM,
324 (int)b->port + 1); 314 b->port + 1);
325 outb_p(0, (int)b->port + 1); 315 outb_p(0, b->port + 1);
326} 316}
327 317
328static inline void pcxe_globalwinon(struct channel *ch) 318static void pcxe_globalwinon(struct channel *ch)
329{ 319{
330 outb_p( FEPWIN, (int)ch->board->port + 1); 320 outb_p( FEPWIN, (int)ch->board->port + 1);
331} 321}
332 322
333static inline void pcxe_rxwinon(struct channel *ch) 323static void pcxe_rxwinon(struct channel *ch)
334{ 324{
335 outb_p(ch->rxwin, (int)ch->board->port + 1); 325 outb_p(ch->rxwin, (int)ch->board->port + 1);
336} 326}
337 327
338static inline void pcxe_txwinon(struct channel *ch) 328static void pcxe_txwinon(struct channel *ch)
339{ 329{
340 outb_p(ch->txwin, (int)ch->board->port + 1); 330 outb_p(ch->txwin, (int)ch->board->port + 1);
341} 331}
342 332
343static inline void pcxe_memoff(struct channel *ch) 333static void pcxe_memoff(struct channel *ch)
344{ 334{
345 outb_p(0, (int)ch->board->port); 335 outb_p(0, (int)ch->board->port);
346 outb_p(0, (int)ch->board->port + 1); 336 outb_p(0, (int)ch->board->port + 1);
@@ -348,44 +338,44 @@ static inline void pcxe_memoff(struct channel *ch)
348 338
349/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */ 339/* ------------- Begin pc64xe and pcxi memory window stuff -------------- */
350 340
351static inline void pcxi_memwinon(struct board_info *b, unsigned int win) 341static void pcxi_memwinon(struct board_info *b, unsigned int win)
352{ 342{
353 outb_p(inb((int)b->port) | FEPMEM, (int)b->port); 343 outb_p(inb(b->port) | FEPMEM, b->port);
354} 344}
355 345
356static inline void pcxi_memwinoff(struct board_info *b, unsigned int win) 346static void pcxi_memwinoff(struct board_info *b, unsigned int win)
357{ 347{
358 outb_p(inb((int)b->port) & ~FEPMEM, (int)b->port); 348 outb_p(inb(b->port) & ~FEPMEM, b->port);
359} 349}
360 350
361static inline void pcxi_globalwinon(struct channel *ch) 351static void pcxi_globalwinon(struct channel *ch)
362{ 352{
363 outb_p(FEPMEM, (int)ch->board->port); 353 outb_p(FEPMEM, ch->board->port);
364} 354}
365 355
366static inline void pcxi_rxwinon(struct channel *ch) 356static void pcxi_rxwinon(struct channel *ch)
367{ 357{
368 outb_p(FEPMEM, (int)ch->board->port); 358 outb_p(FEPMEM, ch->board->port);
369} 359}
370 360
371static inline void pcxi_txwinon(struct channel *ch) 361static void pcxi_txwinon(struct channel *ch)
372{ 362{
373 outb_p(FEPMEM, (int)ch->board->port); 363 outb_p(FEPMEM, ch->board->port);
374} 364}
375 365
376static inline void pcxi_memoff(struct channel *ch) 366static void pcxi_memoff(struct channel *ch)
377{ 367{
378 outb_p(0, (int)ch->board->port); 368 outb_p(0, ch->board->port);
379} 369}
380 370
381static inline void pcxi_assertgwinon(struct channel *ch) 371static void pcxi_assertgwinon(struct channel *ch)
382{ 372{
383 epcaassert(inb((int)ch->board->port) & FEPMEM, "Global memory off"); 373 epcaassert(inb(ch->board->port) & FEPMEM, "Global memory off");
384} 374}
385 375
386static inline void pcxi_assertmemoff(struct channel *ch) 376static void pcxi_assertmemoff(struct channel *ch)
387{ 377{
388 epcaassert(!(inb((int)ch->board->port) & FEPMEM), "Memory on"); 378 epcaassert(!(inb(ch->board->port) & FEPMEM), "Memory on");
389} 379}
390 380
391 381
@@ -398,185 +388,143 @@ static inline void pcxi_assertmemoff(struct channel *ch)
398 may or may not do anything. 388 may or may not do anything.
399---------------------------------------------------------------------------*/ 389---------------------------------------------------------------------------*/
400 390
401static inline void dummy_memwinon(struct board_info *b, unsigned int win) 391static void dummy_memwinon(struct board_info *b, unsigned int win)
402{ 392{
403} 393}
404 394
405static inline void dummy_memwinoff(struct board_info *b, unsigned int win) 395static void dummy_memwinoff(struct board_info *b, unsigned int win)
406{ 396{
407} 397}
408 398
409static inline void dummy_globalwinon(struct channel *ch) 399static void dummy_globalwinon(struct channel *ch)
410{ 400{
411} 401}
412 402
413static inline void dummy_rxwinon(struct channel *ch) 403static void dummy_rxwinon(struct channel *ch)
414{ 404{
415} 405}
416 406
417static inline void dummy_txwinon(struct channel *ch) 407static void dummy_txwinon(struct channel *ch)
418{ 408{
419} 409}
420 410
421static inline void dummy_memoff(struct channel *ch) 411static void dummy_memoff(struct channel *ch)
422{ 412{
423} 413}
424 414
425static inline void dummy_assertgwinon(struct channel *ch) 415static void dummy_assertgwinon(struct channel *ch)
426{ 416{
427} 417}
428 418
429static inline void dummy_assertmemoff(struct channel *ch) 419static void dummy_assertmemoff(struct channel *ch)
430{ 420{
431} 421}
432 422
433/* ----------------- Begin verifyChannel function ----------------------- */ 423/* ----------------- Begin verifyChannel function ----------------------- */
434static inline struct channel *verifyChannel(register struct tty_struct *tty) 424static struct channel *verifyChannel(struct tty_struct *tty)
435{ /* Begin verifyChannel */ 425{ /* Begin verifyChannel */
436
437 /* -------------------------------------------------------------------- 426 /* --------------------------------------------------------------------
438 This routine basically provides a sanity check. It insures that 427 This routine basically provides a sanity check. It insures that
439 the channel returned is within the proper range of addresses as 428 the channel returned is within the proper range of addresses as
440 well as properly initialized. If some bogus info gets passed in 429 well as properly initialized. If some bogus info gets passed in
441 through tty->driver_data this should catch it. 430 through tty->driver_data this should catch it.
442 --------------------------------------------------------------------- */ 431 --------------------------------------------------------------------- */
443 432 if (tty) {
444 if (tty) 433 struct channel *ch = (struct channel *)tty->driver_data;
445 { /* Begin if tty */ 434 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) {
446
447 register struct channel *ch = (struct channel *)tty->driver_data;
448
449 if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs]))
450 {
451 if (ch->magic == EPCA_MAGIC) 435 if (ch->magic == EPCA_MAGIC)
452 return ch; 436 return ch;
453 } 437 }
454 438 }
455 } /* End if tty */
456
457 /* Else return a NULL for invalid */
458 return NULL; 439 return NULL;
459 440
460} /* End verifyChannel */ 441} /* End verifyChannel */
461 442
462/* ------------------ Begin pc_sched_event ------------------------- */ 443/* ------------------ Begin pc_sched_event ------------------------- */
463 444
464static inline void pc_sched_event(struct channel *ch, int event) 445static void pc_sched_event(struct channel *ch, int event)
465{ /* Begin pc_sched_event */ 446{
466
467
468 /* ---------------------------------------------------------------------- 447 /* ----------------------------------------------------------------------
469 We call this to schedule interrupt processing on some event. The 448 We call this to schedule interrupt processing on some event. The
470 kernel sees our request and calls the related routine in OUR driver. 449 kernel sees our request and calls the related routine in OUR driver.
471 -------------------------------------------------------------------------*/ 450 -------------------------------------------------------------------------*/
472
473 ch->event |= 1 << event; 451 ch->event |= 1 << event;
474 schedule_work(&ch->tqueue); 452 schedule_work(&ch->tqueue);
475
476
477} /* End pc_sched_event */ 453} /* End pc_sched_event */
478 454
479/* ------------------ Begin epca_error ------------------------- */ 455/* ------------------ Begin epca_error ------------------------- */
480 456
481static void epca_error(int line, char *msg) 457static void epca_error(int line, char *msg)
482{ /* Begin epca_error */ 458{
483
484 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg); 459 printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg);
485 return; 460}
486
487} /* End epca_error */
488 461
489/* ------------------ Begin pc_close ------------------------- */ 462/* ------------------ Begin pc_close ------------------------- */
490static void pc_close(struct tty_struct * tty, struct file * filp) 463static void pc_close(struct tty_struct * tty, struct file * filp)
491{ /* Begin pc_close */ 464{
492
493 struct channel *ch; 465 struct channel *ch;
494 unsigned long flags; 466 unsigned long flags;
495
496 /* --------------------------------------------------------- 467 /* ---------------------------------------------------------
497 verifyChannel returns the channel from the tty struct 468 verifyChannel returns the channel from the tty struct
498 if it is valid. This serves as a sanity check. 469 if it is valid. This serves as a sanity check.
499 ------------------------------------------------------------- */ 470 ------------------------------------------------------------- */
500 471 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if ch != NULL */
501 if ((ch = verifyChannel(tty)) != NULL) 472 spin_lock_irqsave(&epca_lock, flags);
502 { /* Begin if ch != NULL */ 473 if (tty_hung_up_p(filp)) {
503 474 spin_unlock_irqrestore(&epca_lock, flags);
504 save_flags(flags);
505 cli();
506
507 if (tty_hung_up_p(filp))
508 {
509 restore_flags(flags);
510 return; 475 return;
511 } 476 }
512
513 /* Check to see if the channel is open more than once */ 477 /* Check to see if the channel is open more than once */
514 if (ch->count-- > 1) 478 if (ch->count-- > 1) {
515 { /* Begin channel is open more than once */ 479 /* Begin channel is open more than once */
516
517 /* ------------------------------------------------------------- 480 /* -------------------------------------------------------------
518 Return without doing anything. Someone might still be using 481 Return without doing anything. Someone might still be using
519 the channel. 482 the channel.
520 ---------------------------------------------------------------- */ 483 ---------------------------------------------------------------- */
521 484 spin_unlock_irqrestore(&epca_lock, flags);
522 restore_flags(flags);
523 return; 485 return;
524 } /* End channel is open more than once */ 486 } /* End channel is open more than once */
525 487
526 /* Port open only once go ahead with shutdown & reset */ 488 /* Port open only once go ahead with shutdown & reset */
527 489 if (ch->count < 0)
528 if (ch->count < 0) 490 BUG();
529 {
530 ch->count = 0;
531 }
532 491
533 /* --------------------------------------------------------------- 492 /* ---------------------------------------------------------------
534 Let the rest of the driver know the channel is being closed. 493 Let the rest of the driver know the channel is being closed.
535 This becomes important if an open is attempted before close 494 This becomes important if an open is attempted before close
536 is finished. 495 is finished.
537 ------------------------------------------------------------------ */ 496 ------------------------------------------------------------------ */
538
539 ch->asyncflags |= ASYNC_CLOSING; 497 ch->asyncflags |= ASYNC_CLOSING;
540
541 tty->closing = 1; 498 tty->closing = 1;
542 499
543 if (ch->asyncflags & ASYNC_INITIALIZED) 500 spin_unlock_irqrestore(&epca_lock, flags);
544 { 501
502 if (ch->asyncflags & ASYNC_INITIALIZED) {
545 /* Setup an event to indicate when the transmit buffer empties */ 503 /* Setup an event to indicate when the transmit buffer empties */
546 setup_empty_event(tty, ch); 504 setup_empty_event(tty, ch);
547 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ 505 tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
548 } 506 }
549
550 if (tty->driver->flush_buffer) 507 if (tty->driver->flush_buffer)
551 tty->driver->flush_buffer(tty); 508 tty->driver->flush_buffer(tty);
552 509
553 tty_ldisc_flush(tty); 510 tty_ldisc_flush(tty);
554 shutdown(ch); 511 shutdown(ch);
512
513 spin_lock_irqsave(&epca_lock, flags);
555 tty->closing = 0; 514 tty->closing = 0;
556 ch->event = 0; 515 ch->event = 0;
557 ch->tty = NULL; 516 ch->tty = NULL;
517 spin_unlock_irqrestore(&epca_lock, flags);
558 518
559 if (ch->blocked_open) 519 if (ch->blocked_open) { /* Begin if blocked_open */
560 { /* Begin if blocked_open */
561
562 if (ch->close_delay) 520 if (ch->close_delay)
563 {
564 msleep_interruptible(jiffies_to_msecs(ch->close_delay)); 521 msleep_interruptible(jiffies_to_msecs(ch->close_delay));
565 }
566
567 wake_up_interruptible(&ch->open_wait); 522 wake_up_interruptible(&ch->open_wait);
568
569 } /* End if blocked_open */ 523 } /* End if blocked_open */
570
571 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | 524 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED |
572 ASYNC_CLOSING); 525 ASYNC_CLOSING);
573 wake_up_interruptible(&ch->close_wait); 526 wake_up_interruptible(&ch->close_wait);
574
575
576 restore_flags(flags);
577
578 } /* End if ch != NULL */ 527 } /* End if ch != NULL */
579
580} /* End pc_close */ 528} /* End pc_close */
581 529
582/* ------------------ Begin shutdown ------------------------- */ 530/* ------------------ Begin shutdown ------------------------- */
@@ -586,15 +534,14 @@ static void shutdown(struct channel *ch)
586 534
587 unsigned long flags; 535 unsigned long flags;
588 struct tty_struct *tty; 536 struct tty_struct *tty;
589 volatile struct board_chan *bc; 537 struct board_chan *bc;
590 538
591 if (!(ch->asyncflags & ASYNC_INITIALIZED)) 539 if (!(ch->asyncflags & ASYNC_INITIALIZED))
592 return; 540 return;
593 541
594 save_flags(flags); 542 spin_lock_irqsave(&epca_lock, flags);
595 cli();
596 globalwinon(ch);
597 543
544 globalwinon(ch);
598 bc = ch->brdchan; 545 bc = ch->brdchan;
599 546
600 /* ------------------------------------------------------------------ 547 /* ------------------------------------------------------------------
@@ -604,20 +551,17 @@ static void shutdown(struct channel *ch)
604 --------------------------------------------------------------------- */ 551 --------------------------------------------------------------------- */
605 552
606 if (bc) 553 if (bc)
607 bc->idata = 0; 554 writeb(0, &bc->idata);
608
609 tty = ch->tty; 555 tty = ch->tty;
610 556
611 /* ---------------------------------------------------------------- 557 /* ----------------------------------------------------------------
612 If we're a modem control device and HUPCL is on, drop RTS & DTR. 558 If we're a modem control device and HUPCL is on, drop RTS & DTR.
613 ------------------------------------------------------------------ */ 559 ------------------------------------------------------------------ */
614 560
615 if (tty->termios->c_cflag & HUPCL) 561 if (tty->termios->c_cflag & HUPCL) {
616 {
617 ch->omodem &= ~(ch->m_rts | ch->m_dtr); 562 ch->omodem &= ~(ch->m_rts | ch->m_dtr);
618 fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1); 563 fepcmd(ch, SETMODEM, 0, ch->m_dtr | ch->m_rts, 10, 1);
619 } 564 }
620
621 memoff(ch); 565 memoff(ch);
622 566
623 /* ------------------------------------------------------------------ 567 /* ------------------------------------------------------------------
@@ -628,7 +572,7 @@ static void shutdown(struct channel *ch)
628 /* Prevent future Digi programmed interrupts from coming active */ 572 /* Prevent future Digi programmed interrupts from coming active */
629 573
630 ch->asyncflags &= ~ASYNC_INITIALIZED; 574 ch->asyncflags &= ~ASYNC_INITIALIZED;
631 restore_flags(flags); 575 spin_unlock_irqrestore(&epca_lock, flags);
632 576
633} /* End shutdown */ 577} /* End shutdown */
634 578
@@ -636,7 +580,6 @@ static void shutdown(struct channel *ch)
636 580
637static void pc_hangup(struct tty_struct *tty) 581static void pc_hangup(struct tty_struct *tty)
638{ /* Begin pc_hangup */ 582{ /* Begin pc_hangup */
639
640 struct channel *ch; 583 struct channel *ch;
641 584
642 /* --------------------------------------------------------- 585 /* ---------------------------------------------------------
@@ -644,25 +587,21 @@ static void pc_hangup(struct tty_struct *tty)
644 if it is valid. This serves as a sanity check. 587 if it is valid. This serves as a sanity check.
645 ------------------------------------------------------------- */ 588 ------------------------------------------------------------- */
646 589
647 if ((ch = verifyChannel(tty)) != NULL) 590 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if ch != NULL */
648 { /* Begin if ch != NULL */
649
650 unsigned long flags; 591 unsigned long flags;
651 592
652 save_flags(flags);
653 cli();
654 if (tty->driver->flush_buffer) 593 if (tty->driver->flush_buffer)
655 tty->driver->flush_buffer(tty); 594 tty->driver->flush_buffer(tty);
656 tty_ldisc_flush(tty); 595 tty_ldisc_flush(tty);
657 shutdown(ch); 596 shutdown(ch);
658 597
598 spin_lock_irqsave(&epca_lock, flags);
659 ch->tty = NULL; 599 ch->tty = NULL;
660 ch->event = 0; 600 ch->event = 0;
661 ch->count = 0; 601 ch->count = 0;
662 restore_flags(flags);
663 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED); 602 ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED);
603 spin_unlock_irqrestore(&epca_lock, flags);
664 wake_up_interruptible(&ch->open_wait); 604 wake_up_interruptible(&ch->open_wait);
665
666 } /* End if ch != NULL */ 605 } /* End if ch != NULL */
667 606
668} /* End pc_hangup */ 607} /* End pc_hangup */
@@ -672,18 +611,14 @@ static void pc_hangup(struct tty_struct *tty)
672static int pc_write(struct tty_struct * tty, 611static int pc_write(struct tty_struct * tty,
673 const unsigned char *buf, int bytesAvailable) 612 const unsigned char *buf, int bytesAvailable)
674{ /* Begin pc_write */ 613{ /* Begin pc_write */
675 614 unsigned int head, tail;
676 register unsigned int head, tail; 615 int dataLen;
677 register int dataLen; 616 int size;
678 register int size; 617 int amountCopied;
679 register int amountCopied;
680
681
682 struct channel *ch; 618 struct channel *ch;
683 unsigned long flags; 619 unsigned long flags;
684 int remain; 620 int remain;
685 volatile struct board_chan *bc; 621 struct board_chan *bc;
686
687 622
688 /* ---------------------------------------------------------------- 623 /* ----------------------------------------------------------------
689 pc_write is primarily called directly by the kernel routine 624 pc_write is primarily called directly by the kernel routine
@@ -706,24 +641,20 @@ static int pc_write(struct tty_struct * tty,
706 641
707 bc = ch->brdchan; 642 bc = ch->brdchan;
708 size = ch->txbufsize; 643 size = ch->txbufsize;
709
710 amountCopied = 0; 644 amountCopied = 0;
711 save_flags(flags);
712 cli();
713 645
646 spin_lock_irqsave(&epca_lock, flags);
714 globalwinon(ch); 647 globalwinon(ch);
715 648
716 head = bc->tin & (size - 1); 649 head = readw(&bc->tin) & (size - 1);
717 tail = bc->tout; 650 tail = readw(&bc->tout);
718 651
719 if (tail != bc->tout) 652 if (tail != readw(&bc->tout))
720 tail = bc->tout; 653 tail = readw(&bc->tout);
721 tail &= (size - 1); 654 tail &= (size - 1);
722 655
723 /* If head >= tail, head has not wrapped around. */ 656 /* If head >= tail, head has not wrapped around. */
724 if (head >= tail) 657 if (head >= tail) { /* Begin head has not wrapped */
725 { /* Begin head has not wrapped */
726
727 /* --------------------------------------------------------------- 658 /* ---------------------------------------------------------------
728 remain (much like dataLen above) represents the total amount of 659 remain (much like dataLen above) represents the total amount of
729 space available on the card for data. Here dataLen represents 660 space available on the card for data. Here dataLen represents
@@ -731,26 +662,19 @@ static int pc_write(struct tty_struct * tty,
731 buffer. This is important because a memcpy cannot be told to 662 buffer. This is important because a memcpy cannot be told to
732 automatically wrap around when it hits the buffer end. 663 automatically wrap around when it hits the buffer end.
733 ------------------------------------------------------------------ */ 664 ------------------------------------------------------------------ */
734
735 dataLen = size - head; 665 dataLen = size - head;
736 remain = size - (head - tail) - 1; 666 remain = size - (head - tail) - 1;
737 667 } else { /* Begin head has wrapped around */
738 } /* End head has not wrapped */
739 else
740 { /* Begin head has wrapped around */
741 668
742 remain = tail - head - 1; 669 remain = tail - head - 1;
743 dataLen = remain; 670 dataLen = remain;
744 671
745 } /* End head has wrapped around */ 672 } /* End head has wrapped around */
746
747 /* ------------------------------------------------------------------- 673 /* -------------------------------------------------------------------
748 Check the space on the card. If we have more data than 674 Check the space on the card. If we have more data than
749 space; reduce the amount of data to fit the space. 675 space; reduce the amount of data to fit the space.
750 ---------------------------------------------------------------------- */ 676 ---------------------------------------------------------------------- */
751
752 bytesAvailable = min(remain, bytesAvailable); 677 bytesAvailable = min(remain, bytesAvailable);
753
754 txwinon(ch); 678 txwinon(ch);
755 while (bytesAvailable > 0) 679 while (bytesAvailable > 0)
756 { /* Begin while there is data to copy onto card */ 680 { /* Begin while there is data to copy onto card */
@@ -767,26 +691,21 @@ static int pc_write(struct tty_struct * tty,
767 amountCopied += dataLen; 691 amountCopied += dataLen;
768 bytesAvailable -= dataLen; 692 bytesAvailable -= dataLen;
769 693
770 if (head >= size) 694 if (head >= size) {
771 {
772 head = 0; 695 head = 0;
773 dataLen = tail; 696 dataLen = tail;
774 } 697 }
775
776 } /* End while there is data to copy onto card */ 698 } /* End while there is data to copy onto card */
777
778 ch->statusflags |= TXBUSY; 699 ch->statusflags |= TXBUSY;
779 globalwinon(ch); 700 globalwinon(ch);
780 bc->tin = head; 701 writew(head, &bc->tin);
781 702
782 if ((ch->statusflags & LOWWAIT) == 0) 703 if ((ch->statusflags & LOWWAIT) == 0) {
783 {
784 ch->statusflags |= LOWWAIT; 704 ch->statusflags |= LOWWAIT;
785 bc->ilow = 1; 705 writeb(1, &bc->ilow);
786 } 706 }
787 memoff(ch); 707 memoff(ch);
788 restore_flags(flags); 708 spin_unlock_irqrestore(&epca_lock, flags);
789
790 return(amountCopied); 709 return(amountCopied);
791 710
792} /* End pc_write */ 711} /* End pc_write */
@@ -795,11 +714,7 @@ static int pc_write(struct tty_struct * tty,
795 714
796static void pc_put_char(struct tty_struct *tty, unsigned char c) 715static void pc_put_char(struct tty_struct *tty, unsigned char c)
797{ /* Begin pc_put_char */ 716{ /* Begin pc_put_char */
798
799
800 pc_write(tty, &c, 1); 717 pc_write(tty, &c, 1);
801 return;
802
803} /* End pc_put_char */ 718} /* End pc_put_char */
804 719
805/* ------------------ Begin pc_write_room ------------------------- */ 720/* ------------------ Begin pc_write_room ------------------------- */
@@ -811,7 +726,7 @@ static int pc_write_room(struct tty_struct *tty)
811 struct channel *ch; 726 struct channel *ch;
812 unsigned long flags; 727 unsigned long flags;
813 unsigned int head, tail; 728 unsigned int head, tail;
814 volatile struct board_chan *bc; 729 struct board_chan *bc;
815 730
816 remain = 0; 731 remain = 0;
817 732
@@ -820,33 +735,29 @@ static int pc_write_room(struct tty_struct *tty)
820 if it is valid. This serves as a sanity check. 735 if it is valid. This serves as a sanity check.
821 ------------------------------------------------------------- */ 736 ------------------------------------------------------------- */
822 737
823 if ((ch = verifyChannel(tty)) != NULL) 738 if ((ch = verifyChannel(tty)) != NULL) {
824 { 739 spin_lock_irqsave(&epca_lock, flags);
825 save_flags(flags);
826 cli();
827 globalwinon(ch); 740 globalwinon(ch);
828 741
829 bc = ch->brdchan; 742 bc = ch->brdchan;
830 head = bc->tin & (ch->txbufsize - 1); 743 head = readw(&bc->tin) & (ch->txbufsize - 1);
831 tail = bc->tout; 744 tail = readw(&bc->tout);
832 745
833 if (tail != bc->tout) 746 if (tail != readw(&bc->tout))
834 tail = bc->tout; 747 tail = readw(&bc->tout);
835 /* Wrap tail if necessary */ 748 /* Wrap tail if necessary */
836 tail &= (ch->txbufsize - 1); 749 tail &= (ch->txbufsize - 1);
837 750
838 if ((remain = tail - head - 1) < 0 ) 751 if ((remain = tail - head - 1) < 0 )
839 remain += ch->txbufsize; 752 remain += ch->txbufsize;
840 753
841 if (remain && (ch->statusflags & LOWWAIT) == 0) 754 if (remain && (ch->statusflags & LOWWAIT) == 0) {
842 {
843 ch->statusflags |= LOWWAIT; 755 ch->statusflags |= LOWWAIT;
844 bc->ilow = 1; 756 writeb(1, &bc->ilow);
845 } 757 }
846 memoff(ch); 758 memoff(ch);
847 restore_flags(flags); 759 spin_unlock_irqrestore(&epca_lock, flags);
848 } 760 }
849
850 /* Return how much room is left on card */ 761 /* Return how much room is left on card */
851 return remain; 762 return remain;
852 763
@@ -862,8 +773,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
862 int remain; 773 int remain;
863 unsigned long flags; 774 unsigned long flags;
864 struct channel *ch; 775 struct channel *ch;
865 volatile struct board_chan *bc; 776 struct board_chan *bc;
866
867 777
868 /* --------------------------------------------------------- 778 /* ---------------------------------------------------------
869 verifyChannel returns the channel from the tty struct 779 verifyChannel returns the channel from the tty struct
@@ -873,34 +783,27 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
873 if ((ch = verifyChannel(tty)) == NULL) 783 if ((ch = verifyChannel(tty)) == NULL)
874 return(0); 784 return(0);
875 785
876 save_flags(flags); 786 spin_lock_irqsave(&epca_lock, flags);
877 cli();
878 globalwinon(ch); 787 globalwinon(ch);
879 788
880 bc = ch->brdchan; 789 bc = ch->brdchan;
881 tail = bc->tout; 790 tail = readw(&bc->tout);
882 head = bc->tin; 791 head = readw(&bc->tin);
883 ctail = ch->mailbox->cout; 792 ctail = readw(&ch->mailbox->cout);
884 793
885 if (tail == head && ch->mailbox->cin == ctail && bc->tbusy == 0) 794 if (tail == head && readw(&ch->mailbox->cin) == ctail && readb(&bc->tbusy) == 0)
886 chars = 0; 795 chars = 0;
887 else 796 else { /* Begin if some space on the card has been used */
888 { /* Begin if some space on the card has been used */ 797 head = readw(&bc->tin) & (ch->txbufsize - 1);
889
890 head = bc->tin & (ch->txbufsize - 1);
891 tail &= (ch->txbufsize - 1); 798 tail &= (ch->txbufsize - 1);
892
893 /* -------------------------------------------------------------- 799 /* --------------------------------------------------------------
894 The logic here is basically opposite of the above pc_write_room 800 The logic here is basically opposite of the above pc_write_room
895 here we are finding the amount of bytes in the buffer filled. 801 here we are finding the amount of bytes in the buffer filled.
896 Not the amount of bytes empty. 802 Not the amount of bytes empty.
897 ------------------------------------------------------------------- */ 803 ------------------------------------------------------------------- */
898
899 if ((remain = tail - head - 1) < 0 ) 804 if ((remain = tail - head - 1) < 0 )
900 remain += ch->txbufsize; 805 remain += ch->txbufsize;
901
902 chars = (int)(ch->txbufsize - remain); 806 chars = (int)(ch->txbufsize - remain);
903
904 /* ------------------------------------------------------------- 807 /* -------------------------------------------------------------
905 Make it possible to wakeup anything waiting for output 808 Make it possible to wakeup anything waiting for output
906 in tty_ioctl.c, etc. 809 in tty_ioctl.c, etc.
@@ -908,15 +811,12 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
908 If not already set. Setup an event to indicate when the 811 If not already set. Setup an event to indicate when the
909 transmit buffer empties 812 transmit buffer empties
910 ----------------------------------------------------------------- */ 813 ----------------------------------------------------------------- */
911
912 if (!(ch->statusflags & EMPTYWAIT)) 814 if (!(ch->statusflags & EMPTYWAIT))
913 setup_empty_event(tty,ch); 815 setup_empty_event(tty,ch);
914 816
915 } /* End if some space on the card has been used */ 817 } /* End if some space on the card has been used */
916
917 memoff(ch); 818 memoff(ch);
918 restore_flags(flags); 819 spin_unlock_irqrestore(&epca_lock, flags);
919
920 /* Return number of characters residing on card. */ 820 /* Return number of characters residing on card. */
921 return(chars); 821 return(chars);
922 822
@@ -930,67 +830,46 @@ static void pc_flush_buffer(struct tty_struct *tty)
930 unsigned int tail; 830 unsigned int tail;
931 unsigned long flags; 831 unsigned long flags;
932 struct channel *ch; 832 struct channel *ch;
933 volatile struct board_chan *bc; 833 struct board_chan *bc;
934
935
936 /* --------------------------------------------------------- 834 /* ---------------------------------------------------------
937 verifyChannel returns the channel from the tty struct 835 verifyChannel returns the channel from the tty struct
938 if it is valid. This serves as a sanity check. 836 if it is valid. This serves as a sanity check.
939 ------------------------------------------------------------- */ 837 ------------------------------------------------------------- */
940
941 if ((ch = verifyChannel(tty)) == NULL) 838 if ((ch = verifyChannel(tty)) == NULL)
942 return; 839 return;
943 840
944 save_flags(flags); 841 spin_lock_irqsave(&epca_lock, flags);
945 cli();
946
947 globalwinon(ch); 842 globalwinon(ch);
948
949 bc = ch->brdchan; 843 bc = ch->brdchan;
950 tail = bc->tout; 844 tail = readw(&bc->tout);
951
952 /* Have FEP move tout pointer; effectively flushing transmit buffer */ 845 /* Have FEP move tout pointer; effectively flushing transmit buffer */
953
954 fepcmd(ch, STOUT, (unsigned) tail, 0, 0, 0); 846 fepcmd(ch, STOUT, (unsigned) tail, 0, 0, 0);
955
956 memoff(ch); 847 memoff(ch);
957 restore_flags(flags); 848 spin_unlock_irqrestore(&epca_lock, flags);
958
959 wake_up_interruptible(&tty->write_wait); 849 wake_up_interruptible(&tty->write_wait);
960 tty_wakeup(tty); 850 tty_wakeup(tty);
961
962} /* End pc_flush_buffer */ 851} /* End pc_flush_buffer */
963 852
964/* ------------------ Begin pc_flush_chars ---------------------- */ 853/* ------------------ Begin pc_flush_chars ---------------------- */
965 854
966static void pc_flush_chars(struct tty_struct *tty) 855static void pc_flush_chars(struct tty_struct *tty)
967{ /* Begin pc_flush_chars */ 856{ /* Begin pc_flush_chars */
968
969 struct channel * ch; 857 struct channel * ch;
970
971 /* --------------------------------------------------------- 858 /* ---------------------------------------------------------
972 verifyChannel returns the channel from the tty struct 859 verifyChannel returns the channel from the tty struct
973 if it is valid. This serves as a sanity check. 860 if it is valid. This serves as a sanity check.
974 ------------------------------------------------------------- */ 861 ------------------------------------------------------------- */
975 862 if ((ch = verifyChannel(tty)) != NULL) {
976 if ((ch = verifyChannel(tty)) != NULL)
977 {
978 unsigned long flags; 863 unsigned long flags;
979 864 spin_lock_irqsave(&epca_lock, flags);
980 save_flags(flags);
981 cli();
982
983 /* ---------------------------------------------------------------- 865 /* ----------------------------------------------------------------
984 If not already set and the transmitter is busy setup an event 866 If not already set and the transmitter is busy setup an event
985 to indicate when the transmit empties. 867 to indicate when the transmit empties.
986 ------------------------------------------------------------------- */ 868 ------------------------------------------------------------------- */
987
988 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT)) 869 if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT))
989 setup_empty_event(tty,ch); 870 setup_empty_event(tty,ch);
990 871 spin_unlock_irqrestore(&epca_lock, flags);
991 restore_flags(flags);
992 } 872 }
993
994} /* End pc_flush_chars */ 873} /* End pc_flush_chars */
995 874
996/* ------------------ Begin block_til_ready ---------------------- */ 875/* ------------------ Begin block_til_ready ---------------------- */
@@ -998,14 +877,11 @@ static void pc_flush_chars(struct tty_struct *tty)
998static int block_til_ready(struct tty_struct *tty, 877static int block_til_ready(struct tty_struct *tty,
999 struct file *filp, struct channel *ch) 878 struct file *filp, struct channel *ch)
1000{ /* Begin block_til_ready */ 879{ /* Begin block_til_ready */
1001
1002 DECLARE_WAITQUEUE(wait,current); 880 DECLARE_WAITQUEUE(wait,current);
1003 int retval, do_clocal = 0; 881 int retval, do_clocal = 0;
1004 unsigned long flags; 882 unsigned long flags;
1005 883
1006 884 if (tty_hung_up_p(filp)) {
1007 if (tty_hung_up_p(filp))
1008 {
1009 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 885 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
1010 retval = -EAGAIN; 886 retval = -EAGAIN;
1011 else 887 else
@@ -1017,8 +893,7 @@ static int block_til_ready(struct tty_struct *tty,
1017 If the device is in the middle of being closed, then block 893 If the device is in the middle of being closed, then block
1018 until it's done, and then try again. 894 until it's done, and then try again.
1019 -------------------------------------------------------------------- */ 895 -------------------------------------------------------------------- */
1020 if (ch->asyncflags & ASYNC_CLOSING) 896 if (ch->asyncflags & ASYNC_CLOSING) {
1021 {
1022 interruptible_sleep_on(&ch->close_wait); 897 interruptible_sleep_on(&ch->close_wait);
1023 898
1024 if (ch->asyncflags & ASYNC_HUP_NOTIFY) 899 if (ch->asyncflags & ASYNC_HUP_NOTIFY)
@@ -1027,43 +902,29 @@ static int block_til_ready(struct tty_struct *tty,
1027 return -ERESTARTSYS; 902 return -ERESTARTSYS;
1028 } 903 }
1029 904
1030 if (filp->f_flags & O_NONBLOCK) 905 if (filp->f_flags & O_NONBLOCK) {
1031 {
1032 /* ----------------------------------------------------------------- 906 /* -----------------------------------------------------------------
1033 If non-blocking mode is set, then make the check up front 907 If non-blocking mode is set, then make the check up front
1034 and then exit. 908 and then exit.
1035 -------------------------------------------------------------------- */ 909 -------------------------------------------------------------------- */
1036
1037 ch->asyncflags |= ASYNC_NORMAL_ACTIVE; 910 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
1038
1039 return 0; 911 return 0;
1040 } 912 }
1041
1042
1043 if (tty->termios->c_cflag & CLOCAL) 913 if (tty->termios->c_cflag & CLOCAL)
1044 do_clocal = 1; 914 do_clocal = 1;
1045 915 /* Block waiting for the carrier detect and the line to become free */
1046 /* Block waiting for the carrier detect and the line to become free */
1047 916
1048 retval = 0; 917 retval = 0;
1049 add_wait_queue(&ch->open_wait, &wait); 918 add_wait_queue(&ch->open_wait, &wait);
1050 save_flags(flags);
1051 cli();
1052
1053 919
920 spin_lock_irqsave(&epca_lock, flags);
1054 /* We dec count so that pc_close will know when to free things */ 921 /* We dec count so that pc_close will know when to free things */
1055 if (!tty_hung_up_p(filp)) 922 if (!tty_hung_up_p(filp))
1056 ch->count--; 923 ch->count--;
1057
1058 restore_flags(flags);
1059
1060 ch->blocked_open++; 924 ch->blocked_open++;
1061
1062 while(1) 925 while(1)
1063 { /* Begin forever while */ 926 { /* Begin forever while */
1064
1065 set_current_state(TASK_INTERRUPTIBLE); 927 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 if (tty_hung_up_p(filp) || 928 if (tty_hung_up_p(filp) ||
1068 !(ch->asyncflags & ASYNC_INITIALIZED)) 929 !(ch->asyncflags & ASYNC_INITIALIZED))
1069 { 930 {
@@ -1073,17 +934,14 @@ static int block_til_ready(struct tty_struct *tty,
1073 retval = -ERESTARTSYS; 934 retval = -ERESTARTSYS;
1074 break; 935 break;
1075 } 936 }
1076
1077 if (!(ch->asyncflags & ASYNC_CLOSING) && 937 if (!(ch->asyncflags & ASYNC_CLOSING) &&
1078 (do_clocal || (ch->imodem & ch->dcd))) 938 (do_clocal || (ch->imodem & ch->dcd)))
1079 break; 939 break;
1080 940 if (signal_pending(current)) {
1081 if (signal_pending(current))
1082 {
1083 retval = -ERESTARTSYS; 941 retval = -ERESTARTSYS;
1084 break; 942 break;
1085 } 943 }
1086 944 spin_unlock_irqrestore(&epca_lock, flags);
1087 /* --------------------------------------------------------------- 945 /* ---------------------------------------------------------------
1088 Allow someone else to be scheduled. We will occasionally go 946 Allow someone else to be scheduled. We will occasionally go
1089 through this loop until one of the above conditions change. 947 through this loop until one of the above conditions change.
@@ -1091,25 +949,23 @@ static int block_til_ready(struct tty_struct *tty,
1091 prevent this loop from hogging the cpu. 949 prevent this loop from hogging the cpu.
1092 ------------------------------------------------------------------ */ 950 ------------------------------------------------------------------ */
1093 schedule(); 951 schedule();
952 spin_lock_irqsave(&epca_lock, flags);
1094 953
1095 } /* End forever while */ 954 } /* End forever while */
1096 955
1097 current->state = TASK_RUNNING; 956 current->state = TASK_RUNNING;
1098 remove_wait_queue(&ch->open_wait, &wait); 957 remove_wait_queue(&ch->open_wait, &wait);
1099 cli();
1100 if (!tty_hung_up_p(filp)) 958 if (!tty_hung_up_p(filp))
1101 ch->count++; 959 ch->count++;
1102 restore_flags(flags);
1103
1104 ch->blocked_open--; 960 ch->blocked_open--;
1105 961
962 spin_unlock_irqrestore(&epca_lock, flags);
963
1106 if (retval) 964 if (retval)
1107 return retval; 965 return retval;
1108 966
1109 ch->asyncflags |= ASYNC_NORMAL_ACTIVE; 967 ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
1110
1111 return 0; 968 return 0;
1112
1113} /* End block_til_ready */ 969} /* End block_til_ready */
1114 970
1115/* ------------------ Begin pc_open ---------------------- */ 971/* ------------------ Begin pc_open ---------------------- */
@@ -1120,17 +976,12 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1120 struct channel *ch; 976 struct channel *ch;
1121 unsigned long flags; 977 unsigned long flags;
1122 int line, retval, boardnum; 978 int line, retval, boardnum;
1123 volatile struct board_chan *bc; 979 struct board_chan *bc;
1124 volatile unsigned int head; 980 unsigned int head;
1125 981
1126 line = tty->index; 982 line = tty->index;
1127 if (line < 0 || line >= nbdevs) 983 if (line < 0 || line >= nbdevs)
1128 { 984 return -ENODEV;
1129 printk(KERN_ERR "<Error> - pc_open : line out of range in pc_open\n");
1130 tty->driver_data = NULL;
1131 return(-ENODEV);
1132 }
1133
1134 985
1135 ch = &digi_channels[line]; 986 ch = &digi_channels[line];
1136 boardnum = ch->boardnum; 987 boardnum = ch->boardnum;
@@ -1143,66 +994,49 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1143 goes here. 994 goes here.
1144 ---------------------------------------------------------------------- */ 995 ---------------------------------------------------------------------- */
1145 996
1146 if (invalid_lilo_config) 997 if (invalid_lilo_config) {
1147 {
1148 if (setup_error_code & INVALID_BOARD_TYPE) 998 if (setup_error_code & INVALID_BOARD_TYPE)
1149 printk(KERN_ERR "<Error> - pc_open: Invalid board type specified in LILO command\n"); 999 printk(KERN_ERR "epca: pc_open: Invalid board type specified in kernel options.\n");
1150
1151 if (setup_error_code & INVALID_NUM_PORTS) 1000 if (setup_error_code & INVALID_NUM_PORTS)
1152 printk(KERN_ERR "<Error> - pc_open: Invalid number of ports specified in LILO command\n"); 1001 printk(KERN_ERR "epca: pc_open: Invalid number of ports specified in kernel options.\n");
1153
1154 if (setup_error_code & INVALID_MEM_BASE) 1002 if (setup_error_code & INVALID_MEM_BASE)
1155 printk(KERN_ERR "<Error> - pc_open: Invalid board memory address specified in LILO command\n"); 1003 printk(KERN_ERR "epca: pc_open: Invalid board memory address specified in kernel options.\n");
1156
1157 if (setup_error_code & INVALID_PORT_BASE) 1004 if (setup_error_code & INVALID_PORT_BASE)
1158 printk(KERN_ERR "<Error> - pc_open: Invalid board port address specified in LILO command\n"); 1005 printk(KERN_ERR "epca; pc_open: Invalid board port address specified in kernel options.\n");
1159
1160 if (setup_error_code & INVALID_BOARD_STATUS) 1006 if (setup_error_code & INVALID_BOARD_STATUS)
1161 printk(KERN_ERR "<Error> - pc_open: Invalid board status specified in LILO command\n"); 1007 printk(KERN_ERR "epca: pc_open: Invalid board status specified in kernel options.\n");
1162
1163 if (setup_error_code & INVALID_ALTPIN) 1008 if (setup_error_code & INVALID_ALTPIN)
1164 printk(KERN_ERR "<Error> - pc_open: Invalid board altpin specified in LILO command\n"); 1009 printk(KERN_ERR "epca: pc_open: Invalid board altpin specified in kernel options;\n");
1165
1166 tty->driver_data = NULL; /* Mark this device as 'down' */ 1010 tty->driver_data = NULL; /* Mark this device as 'down' */
1167 return(-ENODEV); 1011 return -ENODEV;
1168 } 1012 }
1169 1013 if (boardnum >= num_cards || boards[boardnum].status == DISABLED) {
1170 if ((boardnum >= num_cards) || (boards[boardnum].status == DISABLED))
1171 {
1172 tty->driver_data = NULL; /* Mark this device as 'down' */ 1014 tty->driver_data = NULL; /* Mark this device as 'down' */
1173 return(-ENODEV); 1015 return(-ENODEV);
1174 } 1016 }
1175 1017
1176 if (( bc = ch->brdchan) == 0) 1018 if ((bc = ch->brdchan) == 0) {
1177 {
1178 tty->driver_data = NULL; 1019 tty->driver_data = NULL;
1179 return(-ENODEV); 1020 return -ENODEV;
1180 } 1021 }
1181 1022
1023 spin_lock_irqsave(&epca_lock, flags);
1182 /* ------------------------------------------------------------------ 1024 /* ------------------------------------------------------------------
1183 Every time a channel is opened, increment a counter. This is 1025 Every time a channel is opened, increment a counter. This is
1184 necessary because we do not wish to flush and shutdown the channel 1026 necessary because we do not wish to flush and shutdown the channel
1185 until the last app holding the channel open, closes it. 1027 until the last app holding the channel open, closes it.
1186 --------------------------------------------------------------------- */ 1028 --------------------------------------------------------------------- */
1187
1188 ch->count++; 1029 ch->count++;
1189
1190 /* ---------------------------------------------------------------- 1030 /* ----------------------------------------------------------------
1191 Set a kernel structures pointer to our local channel 1031 Set a kernel structures pointer to our local channel
1192 structure. This way we can get to it when passed only 1032 structure. This way we can get to it when passed only
1193 a tty struct. 1033 a tty struct.
1194 ------------------------------------------------------------------ */ 1034 ------------------------------------------------------------------ */
1195
1196 tty->driver_data = ch; 1035 tty->driver_data = ch;
1197
1198 /* ---------------------------------------------------------------- 1036 /* ----------------------------------------------------------------
1199 If this is the first time the channel has been opened, initialize 1037 If this is the first time the channel has been opened, initialize
1200 the tty->termios struct otherwise let pc_close handle it. 1038 the tty->termios struct otherwise let pc_close handle it.
1201 -------------------------------------------------------------------- */ 1039 -------------------------------------------------------------------- */
1202
1203 save_flags(flags);
1204 cli();
1205
1206 globalwinon(ch); 1040 globalwinon(ch);
1207 ch->statusflags = 0; 1041 ch->statusflags = 0;
1208 1042
@@ -1213,8 +1047,8 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1213 Set receive head and tail ptrs to each other. This indicates 1047 Set receive head and tail ptrs to each other. This indicates
1214 no data available to read. 1048 no data available to read.
1215 ----------------------------------------------------------------- */ 1049 ----------------------------------------------------------------- */
1216 head = bc->rin; 1050 head = readw(&bc->rin);
1217 bc->rout = head; 1051 writew(head, &bc->rout);
1218 1052
1219 /* Set the channels associated tty structure */ 1053 /* Set the channels associated tty structure */
1220 ch->tty = tty; 1054 ch->tty = tty;
@@ -1224,122 +1058,74 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
1224 issues, etc.... It effect both control flags and input flags. 1058 issues, etc.... It effect both control flags and input flags.
1225 -------------------------------------------------------------------- */ 1059 -------------------------------------------------------------------- */
1226 epcaparam(tty,ch); 1060 epcaparam(tty,ch);
1227
1228 ch->asyncflags |= ASYNC_INITIALIZED; 1061 ch->asyncflags |= ASYNC_INITIALIZED;
1229 memoff(ch); 1062 memoff(ch);
1230 1063 spin_unlock_irqrestore(&epca_lock, flags);
1231 restore_flags(flags);
1232 1064
1233 retval = block_til_ready(tty, filp, ch); 1065 retval = block_til_ready(tty, filp, ch);
1234 if (retval) 1066 if (retval)
1235 {
1236 return retval; 1067 return retval;
1237 }
1238
1239 /* ------------------------------------------------------------- 1068 /* -------------------------------------------------------------
1240 Set this again in case a hangup set it to zero while this 1069 Set this again in case a hangup set it to zero while this
1241 open() was waiting for the line... 1070 open() was waiting for the line...
1242 --------------------------------------------------------------- */ 1071 --------------------------------------------------------------- */
1072 spin_lock_irqsave(&epca_lock, flags);
1243 ch->tty = tty; 1073 ch->tty = tty;
1244
1245 save_flags(flags);
1246 cli();
1247 globalwinon(ch); 1074 globalwinon(ch);
1248
1249 /* Enable Digi Data events */ 1075 /* Enable Digi Data events */
1250 bc->idata = 1; 1076 writeb(1, &bc->idata);
1251
1252 memoff(ch); 1077 memoff(ch);
1253 restore_flags(flags); 1078 spin_unlock_irqrestore(&epca_lock, flags);
1254
1255 return 0; 1079 return 0;
1256
1257} /* End pc_open */ 1080} /* End pc_open */
1258 1081
1259#ifdef MODULE
1260static int __init epca_module_init(void) 1082static int __init epca_module_init(void)
1261{ /* Begin init_module */ 1083{ /* Begin init_module */
1262 1084 return pc_init();
1263 unsigned long flags;
1264
1265 save_flags(flags);
1266 cli();
1267
1268 pc_init();
1269
1270 restore_flags(flags);
1271
1272 return(0);
1273} 1085}
1274 1086
1275module_init(epca_module_init); 1087module_init(epca_module_init);
1276#endif
1277 1088
1278#ifdef ENABLE_PCI
1279static struct pci_driver epca_driver; 1089static struct pci_driver epca_driver;
1280#endif
1281
1282#ifdef MODULE
1283/* -------------------- Begin cleanup_module ---------------------- */
1284 1090
1285static void __exit epca_module_exit(void) 1091static void __exit epca_module_exit(void)
1286{ 1092{
1287
1288 int count, crd; 1093 int count, crd;
1289 struct board_info *bd; 1094 struct board_info *bd;
1290 struct channel *ch; 1095 struct channel *ch;
1291 unsigned long flags;
1292 1096
1293 del_timer_sync(&epca_timer); 1097 del_timer_sync(&epca_timer);
1294 1098
1295 save_flags(flags);
1296 cli();
1297
1298 if ((tty_unregister_driver(pc_driver)) || 1099 if ((tty_unregister_driver(pc_driver)) ||
1299 (tty_unregister_driver(pc_info))) 1100 (tty_unregister_driver(pc_info)))
1300 { 1101 {
1301 printk(KERN_WARNING "<Error> - DIGI : cleanup_module failed to un-register tty driver\n"); 1102 printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n");
1302 restore_flags(flags);
1303 return; 1103 return;
1304 } 1104 }
1305 put_tty_driver(pc_driver); 1105 put_tty_driver(pc_driver);
1306 put_tty_driver(pc_info); 1106 put_tty_driver(pc_info);
1307 1107
1308 for (crd = 0; crd < num_cards; crd++) 1108 for (crd = 0; crd < num_cards; crd++) { /* Begin for each card */
1309 { /* Begin for each card */
1310
1311 bd = &boards[crd]; 1109 bd = &boards[crd];
1312
1313 if (!bd) 1110 if (!bd)
1314 { /* Begin sanity check */ 1111 { /* Begin sanity check */
1315 printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n"); 1112 printk(KERN_ERR "<Error> - Digi : cleanup_module failed\n");
1316 return; 1113 return;
1317 } /* End sanity check */ 1114 } /* End sanity check */
1318 1115 ch = card_ptr[crd];
1319 ch = card_ptr[crd];
1320
1321 for (count = 0; count < bd->numports; count++, ch++) 1116 for (count = 0; count < bd->numports; count++, ch++)
1322 { /* Begin for each port */ 1117 { /* Begin for each port */
1323 1118 if (ch) {
1324 if (ch)
1325 {
1326 if (ch->tty) 1119 if (ch->tty)
1327 tty_hangup(ch->tty); 1120 tty_hangup(ch->tty);
1328 kfree(ch->tmp_buf); 1121 kfree(ch->tmp_buf);
1329 } 1122 }
1330
1331 } /* End for each port */ 1123 } /* End for each port */
1332 } /* End for each card */ 1124 } /* End for each card */
1333
1334#ifdef ENABLE_PCI
1335 pci_unregister_driver (&epca_driver); 1125 pci_unregister_driver (&epca_driver);
1336#endif
1337
1338 restore_flags(flags);
1339
1340} 1126}
1127
1341module_exit(epca_module_exit); 1128module_exit(epca_module_exit);
1342#endif /* MODULE */
1343 1129
1344static struct tty_operations pc_ops = { 1130static struct tty_operations pc_ops = {
1345 .open = pc_open, 1131 .open = pc_open,
@@ -1371,34 +1157,15 @@ static struct tty_operations info_ops = {
1371 1157
1372/* ------------------ Begin pc_init ---------------------- */ 1158/* ------------------ Begin pc_init ---------------------- */
1373 1159
1374int __init pc_init(void) 1160static int __init pc_init(void)
1375{ /* Begin pc_init */ 1161{ /* Begin pc_init */
1376
1377 /* ----------------------------------------------------------------
1378 pc_init is called by the operating system during boot up prior to
1379 any open calls being made. In the older versions of Linux (Prior
1380 to 2.0.0) an entry is made into tty_io.c. A pointer to the last
1381 memory location (from kernel space) used (kmem_start) is passed
1382 to pc_init. It is pc_inits responsibility to modify this value
1383 for any memory that the Digi driver might need and then return
1384 this value to the operating system. For example if the driver
1385 wishes to allocate 1K of kernel memory, pc_init would return
1386 (kmem_start + 1024). This memory (Between kmem_start and kmem_start
1387 + 1024) would then be available for use exclusively by the driver.
1388 In this case our driver does not allocate any of this kernel
1389 memory.
1390 ------------------------------------------------------------------*/
1391
1392 ulong flags;
1393 int crd; 1162 int crd;
1394 struct board_info *bd; 1163 struct board_info *bd;
1395 unsigned char board_id = 0; 1164 unsigned char board_id = 0;
1396 1165
1397#ifdef ENABLE_PCI
1398 int pci_boards_found, pci_count; 1166 int pci_boards_found, pci_count;
1399 1167
1400 pci_count = 0; 1168 pci_count = 0;
1401#endif /* ENABLE_PCI */
1402 1169
1403 pc_driver = alloc_tty_driver(MAX_ALLOC); 1170 pc_driver = alloc_tty_driver(MAX_ALLOC);
1404 if (!pc_driver) 1171 if (!pc_driver)
@@ -1416,8 +1183,7 @@ int __init pc_init(void)
1416 Note : If LILO has ran epca_setup then epca_setup will handle defining 1183 Note : If LILO has ran epca_setup then epca_setup will handle defining
1417 num_cards as well as copying the data into the board structure. 1184 num_cards as well as copying the data into the board structure.
1418 -------------------------------------------------------------------------- */ 1185 -------------------------------------------------------------------------- */
1419 if (!liloconfig) 1186 if (!liloconfig) { /* Begin driver has been configured via. epcaconfig */
1420 { /* Begin driver has been configured via. epcaconfig */
1421 1187
1422 nbdevs = NBDEVS; 1188 nbdevs = NBDEVS;
1423 num_cards = NUMCARDS; 1189 num_cards = NUMCARDS;
@@ -1440,8 +1206,6 @@ int __init pc_init(void)
1440 1206
1441 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION); 1207 printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION);
1442 1208
1443#ifdef ENABLE_PCI
1444
1445 /* ------------------------------------------------------------------ 1209 /* ------------------------------------------------------------------
1446 NOTE : This code assumes that the number of ports found in 1210 NOTE : This code assumes that the number of ports found in
1447 the boards array is correct. This could be wrong if 1211 the boards array is correct. This could be wrong if
@@ -1467,8 +1231,6 @@ int __init pc_init(void)
1467 pci_boards_found += init_PCI(); 1231 pci_boards_found += init_PCI();
1468 num_cards += pci_boards_found; 1232 num_cards += pci_boards_found;
1469 1233
1470#endif /* ENABLE_PCI */
1471
1472 pc_driver->owner = THIS_MODULE; 1234 pc_driver->owner = THIS_MODULE;
1473 pc_driver->name = "ttyD"; 1235 pc_driver->name = "ttyD";
1474 pc_driver->devfs_name = "tts/D"; 1236 pc_driver->devfs_name = "tts/D";
@@ -1499,9 +1261,6 @@ int __init pc_init(void)
1499 tty_set_operations(pc_info, &info_ops); 1261 tty_set_operations(pc_info, &info_ops);
1500 1262
1501 1263
1502 save_flags(flags);
1503 cli();
1504
1505 for (crd = 0; crd < num_cards; crd++) 1264 for (crd = 0; crd < num_cards; crd++)
1506 { /* Begin for each card */ 1265 { /* Begin for each card */
1507 1266
@@ -1610,11 +1369,7 @@ int __init pc_init(void)
1610 if ((board_id & 0x30) == 0x30) 1369 if ((board_id & 0x30) == 0x30)
1611 bd->memory_seg = 0x8000; 1370 bd->memory_seg = 0x8000;
1612 1371
1613 } /* End it is an XI card */ 1372 } else printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port);
1614 else
1615 {
1616 printk(KERN_ERR "<Error> - Board at 0x%x doesn't appear to be an XI\n",(int)bd->port);
1617 }
1618 break; 1373 break;
1619 1374
1620 } /* End switch on bd->type */ 1375 } /* End switch on bd->type */
@@ -1634,9 +1389,6 @@ int __init pc_init(void)
1634 init_timer(&epca_timer); 1389 init_timer(&epca_timer);
1635 epca_timer.function = epcapoll; 1390 epca_timer.function = epcapoll;
1636 mod_timer(&epca_timer, jiffies + HZ/25); 1391 mod_timer(&epca_timer, jiffies + HZ/25);
1637
1638 restore_flags(flags);
1639
1640 return 0; 1392 return 0;
1641 1393
1642} /* End pc_init */ 1394} /* End pc_init */
@@ -1647,10 +1399,10 @@ static void post_fep_init(unsigned int crd)
1647{ /* Begin post_fep_init */ 1399{ /* Begin post_fep_init */
1648 1400
1649 int i; 1401 int i;
1650 unchar *memaddr; 1402 unsigned char *memaddr;
1651 volatile struct global_data *gd; 1403 struct global_data *gd;
1652 struct board_info *bd; 1404 struct board_info *bd;
1653 volatile struct board_chan *bc; 1405 struct board_chan *bc;
1654 struct channel *ch; 1406 struct channel *ch;
1655 int shrinkmem = 0, lowwater ; 1407 int shrinkmem = 0, lowwater ;
1656 1408
@@ -1669,9 +1421,7 @@ static void post_fep_init(unsigned int crd)
1669 after DIGI_INIT has been called will return the proper values. 1421 after DIGI_INIT has been called will return the proper values.
1670 ------------------------------------------------------------------- */ 1422 ------------------------------------------------------------------- */
1671 1423
1672 if (bd->type >= PCIXEM) /* If the board in question is PCI */ 1424 if (bd->type >= PCIXEM) { /* Begin get PCI number of ports */
1673 { /* Begin get PCI number of ports */
1674
1675 /* -------------------------------------------------------------------- 1425 /* --------------------------------------------------------------------
1676 Below we use XEMPORTS as a memory offset regardless of which PCI 1426 Below we use XEMPORTS as a memory offset regardless of which PCI
1677 card it is. This is because all of the supported PCI cards have 1427 card it is. This is because all of the supported PCI cards have
@@ -1685,15 +1435,15 @@ static void post_fep_init(unsigned int crd)
1685 (FYI - The id should be located at 0x1ac (And may use up to 4 bytes 1435 (FYI - The id should be located at 0x1ac (And may use up to 4 bytes
1686 if the box in question is a XEM or CX)). 1436 if the box in question is a XEM or CX)).
1687 ------------------------------------------------------------------------ */ 1437 ------------------------------------------------------------------------ */
1688 1438 /* PCI cards are already remapped at this point ISA are not */
1689 bd->numports = (unsigned short)*(unsigned char *)bus_to_virt((unsigned long) 1439 bd->numports = readw(bd->re_map_membase + XEMPORTS);
1690 (bd->re_map_membase + XEMPORTS));
1691
1692
1693 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports"); 1440 epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports");
1694 nbdevs += (bd->numports); 1441 nbdevs += (bd->numports);
1695 1442 } else {
1696 } /* End get PCI number of ports */ 1443 /* Fix up the mappings for ISA/EISA etc */
1444 /* FIXME: 64K - can we be smarter ? */
1445 bd->re_map_membase = ioremap(bd->membase, 0x10000);
1446 }
1697 1447
1698 if (crd != 0) 1448 if (crd != 0)
1699 card_ptr[crd] = card_ptr[crd-1] + boards[crd-1].numports; 1449 card_ptr[crd] = card_ptr[crd-1] + boards[crd-1].numports;
@@ -1701,19 +1451,9 @@ static void post_fep_init(unsigned int crd)
1701 card_ptr[crd] = &digi_channels[crd]; /* <- For card 0 only */ 1451 card_ptr[crd] = &digi_channels[crd]; /* <- For card 0 only */
1702 1452
1703 ch = card_ptr[crd]; 1453 ch = card_ptr[crd];
1704
1705
1706 epcaassert(ch <= &digi_channels[nbdevs - 1], "ch out of range"); 1454 epcaassert(ch <= &digi_channels[nbdevs - 1], "ch out of range");
1707 1455
1708 memaddr = (unchar *)bd->re_map_membase; 1456 memaddr = bd->re_map_membase;
1709
1710 /*
1711 The below command is necessary because newer kernels (2.1.x and
1712 up) do not have a 1:1 virtual to physical mapping. The below
1713 call adjust for that.
1714 */
1715
1716 memaddr = (unsigned char *)bus_to_virt((unsigned long)memaddr);
1717 1457
1718 /* ----------------------------------------------------------------- 1458 /* -----------------------------------------------------------------
1719 The below assignment will set bc to point at the BEGINING of 1459 The below assignment will set bc to point at the BEGINING of
@@ -1721,7 +1461,7 @@ static void post_fep_init(unsigned int crd)
1721 8 and 64 of these structures. 1461 8 and 64 of these structures.
1722 -------------------------------------------------------------------- */ 1462 -------------------------------------------------------------------- */
1723 1463
1724 bc = (volatile struct board_chan *)((ulong)memaddr + CHANSTRUCT); 1464 bc = (struct board_chan *)(memaddr + CHANSTRUCT);
1725 1465
1726 /* ------------------------------------------------------------------- 1466 /* -------------------------------------------------------------------
1727 The below assignment will set gd to point at the BEGINING of 1467 The below assignment will set gd to point at the BEGINING of
@@ -1730,20 +1470,18 @@ static void post_fep_init(unsigned int crd)
1730 pointer begins at 0xd10. 1470 pointer begins at 0xd10.
1731 ---------------------------------------------------------------------- */ 1471 ---------------------------------------------------------------------- */
1732 1472
1733 gd = (volatile struct global_data *)((ulong)memaddr + GLOBAL); 1473 gd = (struct global_data *)(memaddr + GLOBAL);
1734 1474
1735 /* -------------------------------------------------------------------- 1475 /* --------------------------------------------------------------------
1736 XEPORTS (address 0xc22) points at the number of channels the 1476 XEPORTS (address 0xc22) points at the number of channels the
1737 card supports. (For 64XE, XI, XEM, and XR use 0xc02) 1477 card supports. (For 64XE, XI, XEM, and XR use 0xc02)
1738 ----------------------------------------------------------------------- */ 1478 ----------------------------------------------------------------------- */
1739 1479
1740 if (((bd->type == PCXEVE) | (bd->type == PCXE)) && 1480 if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3))
1741 (*(ushort *)((ulong)memaddr + XEPORTS) < 3))
1742 shrinkmem = 1; 1481 shrinkmem = 1;
1743 if (bd->type < PCIXEM) 1482 if (bd->type < PCIXEM)
1744 if (!request_region((int)bd->port, 4, board_desc[bd->type])) 1483 if (!request_region((int)bd->port, 4, board_desc[bd->type]))
1745 return; 1484 return;
1746
1747 memwinon(bd, 0); 1485 memwinon(bd, 0);
1748 1486
1749 /* -------------------------------------------------------------------- 1487 /* --------------------------------------------------------------------
@@ -1753,17 +1491,16 @@ static void post_fep_init(unsigned int crd)
1753 1491
1754 /* For every port on the card do ..... */ 1492 /* For every port on the card do ..... */
1755 1493
1756 for (i = 0; i < bd->numports; i++, ch++, bc++) 1494 for (i = 0; i < bd->numports; i++, ch++, bc++) { /* Begin for each port */
1757 { /* Begin for each port */ 1495 unsigned long flags;
1758 1496
1759 ch->brdchan = bc; 1497 ch->brdchan = bc;
1760 ch->mailbox = gd; 1498 ch->mailbox = gd;
1761 INIT_WORK(&ch->tqueue, do_softint, ch); 1499 INIT_WORK(&ch->tqueue, do_softint, ch);
1762 ch->board = &boards[crd]; 1500 ch->board = &boards[crd];
1763 1501
1764 switch (bd->type) 1502 spin_lock_irqsave(&epca_lock, flags);
1765 { /* Begin switch bd->type */ 1503 switch (bd->type) {
1766
1767 /* ---------------------------------------------------------------- 1504 /* ----------------------------------------------------------------
1768 Since some of the boards use different bitmaps for their 1505 Since some of the boards use different bitmaps for their
1769 control signals we cannot hard code these values and retain 1506 control signals we cannot hard code these values and retain
@@ -1796,14 +1533,12 @@ static void post_fep_init(unsigned int crd)
1796 1533
1797 } /* End switch bd->type */ 1534 } /* End switch bd->type */
1798 1535
1799 if (boards[crd].altpin) 1536 if (boards[crd].altpin) {
1800 {
1801 ch->dsr = ch->m_dcd; 1537 ch->dsr = ch->m_dcd;
1802 ch->dcd = ch->m_dsr; 1538 ch->dcd = ch->m_dsr;
1803 ch->digiext.digi_flags |= DIGI_ALTPIN; 1539 ch->digiext.digi_flags |= DIGI_ALTPIN;
1804 } 1540 }
1805 else 1541 else {
1806 {
1807 ch->dcd = ch->m_dcd; 1542 ch->dcd = ch->m_dcd;
1808 ch->dsr = ch->m_dsr; 1543 ch->dsr = ch->m_dsr;
1809 } 1544 }
@@ -1813,14 +1548,12 @@ static void post_fep_init(unsigned int crd)
1813 ch->magic = EPCA_MAGIC; 1548 ch->magic = EPCA_MAGIC;
1814 ch->tty = NULL; 1549 ch->tty = NULL;
1815 1550
1816 if (shrinkmem) 1551 if (shrinkmem) {
1817 {
1818 fepcmd(ch, SETBUFFER, 32, 0, 0, 0); 1552 fepcmd(ch, SETBUFFER, 32, 0, 0, 0);
1819 shrinkmem = 0; 1553 shrinkmem = 0;
1820 } 1554 }
1821 1555
1822 switch (bd->type) 1556 switch (bd->type) {
1823 { /* Begin switch bd->type */
1824 1557
1825 case PCIXEM: 1558 case PCIXEM:
1826 case PCIXRJ: 1559 case PCIXRJ:
@@ -1878,13 +1611,13 @@ static void post_fep_init(unsigned int crd)
1878 1611
1879 fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0); 1612 fepcmd(ch, SRXHWATER, (3 * ch->rxbufsize / 4), 0, 10, 0);
1880 1613
1881 bc->edelay = 100; 1614 writew(100, &bc->edelay);
1882 bc->idata = 1; 1615 writeb(1, &bc->idata);
1883 1616
1884 ch->startc = bc->startc; 1617 ch->startc = readb(&bc->startc);
1885 ch->stopc = bc->stopc; 1618 ch->stopc = readb(&bc->stopc);
1886 ch->startca = bc->startca; 1619 ch->startca = readb(&bc->startca);
1887 ch->stopca = bc->stopca; 1620 ch->stopca = readb(&bc->stopca);
1888 1621
1889 ch->fepcflag = 0; 1622 ch->fepcflag = 0;
1890 ch->fepiflag = 0; 1623 ch->fepiflag = 0;
@@ -1899,27 +1632,23 @@ static void post_fep_init(unsigned int crd)
1899 ch->blocked_open = 0; 1632 ch->blocked_open = 0;
1900 init_waitqueue_head(&ch->open_wait); 1633 init_waitqueue_head(&ch->open_wait);
1901 init_waitqueue_head(&ch->close_wait); 1634 init_waitqueue_head(&ch->close_wait);
1635
1636 spin_unlock_irqrestore(&epca_lock, flags);
1637
1902 ch->tmp_buf = kmalloc(ch->txbufsize,GFP_KERNEL); 1638 ch->tmp_buf = kmalloc(ch->txbufsize,GFP_KERNEL);
1903 if (!(ch->tmp_buf)) 1639 if (!ch->tmp_buf) {
1904 {
1905 printk(KERN_ERR "POST FEP INIT : kmalloc failed for port 0x%x\n",i); 1640 printk(KERN_ERR "POST FEP INIT : kmalloc failed for port 0x%x\n",i);
1906 release_region((int)bd->port, 4); 1641 release_region((int)bd->port, 4);
1907 while(i-- > 0) 1642 while(i-- > 0)
1908 kfree((ch--)->tmp_buf); 1643 kfree((ch--)->tmp_buf);
1909 return; 1644 return;
1910 } 1645 } else
1911 else
1912 memset((void *)ch->tmp_buf,0,ch->txbufsize); 1646 memset((void *)ch->tmp_buf,0,ch->txbufsize);
1913 } /* End for each port */ 1647 } /* End for each port */
1914 1648
1915 printk(KERN_INFO 1649 printk(KERN_INFO
1916 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", 1650 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
1917 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports); 1651 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports);
1918 sprintf(mesg,
1919 "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
1920 VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports);
1921 console_print(mesg);
1922
1923 memwinoff(bd, 0); 1652 memwinoff(bd, 0);
1924 1653
1925} /* End post_fep_init */ 1654} /* End post_fep_init */
@@ -1943,9 +1672,6 @@ static void epcapoll(unsigned long ignored)
1943 buffer empty) and acts on those events. 1672 buffer empty) and acts on those events.
1944 ----------------------------------------------------------------------- */ 1673 ----------------------------------------------------------------------- */
1945 1674
1946 save_flags(flags);
1947 cli();
1948
1949 for (crd = 0; crd < num_cards; crd++) 1675 for (crd = 0; crd < num_cards; crd++)
1950 { /* Begin for each card */ 1676 { /* Begin for each card */
1951 1677
@@ -1961,6 +1687,8 @@ static void epcapoll(unsigned long ignored)
1961 some legacy boards. 1687 some legacy boards.
1962 ---------------------------------------------------------------- */ 1688 ---------------------------------------------------------------- */
1963 1689
1690 spin_lock_irqsave(&epca_lock, flags);
1691
1964 assertmemoff(ch); 1692 assertmemoff(ch);
1965 1693
1966 globalwinon(ch); 1694 globalwinon(ch);
@@ -1970,21 +1698,19 @@ static void epcapoll(unsigned long ignored)
1970 the transmit or receive queue. 1698 the transmit or receive queue.
1971 ------------------------------------------------------------------- */ 1699 ------------------------------------------------------------------- */
1972 1700
1973 head = ch->mailbox->ein; 1701 head = readw(&ch->mailbox->ein);
1974 tail = ch->mailbox->eout; 1702 tail = readw(&ch->mailbox->eout);
1975 1703
1976 /* If head isn't equal to tail we have an event */ 1704 /* If head isn't equal to tail we have an event */
1977 1705
1978 if (head != tail) 1706 if (head != tail)
1979 doevent(crd); 1707 doevent(crd);
1980
1981 memoff(ch); 1708 memoff(ch);
1982 1709
1983 } /* End for each card */ 1710 spin_unlock_irqrestore(&epca_lock, flags);
1984 1711
1712 } /* End for each card */
1985 mod_timer(&epca_timer, jiffies + (HZ / 25)); 1713 mod_timer(&epca_timer, jiffies + (HZ / 25));
1986
1987 restore_flags(flags);
1988} /* End epcapoll */ 1714} /* End epcapoll */
1989 1715
1990/* --------------------- Begin doevent ------------------------ */ 1716/* --------------------- Begin doevent ------------------------ */
@@ -1992,53 +1718,42 @@ static void epcapoll(unsigned long ignored)
1992static void doevent(int crd) 1718static void doevent(int crd)
1993{ /* Begin doevent */ 1719{ /* Begin doevent */
1994 1720
1995 volatile unchar *eventbuf; 1721 void *eventbuf;
1996 struct channel *ch, *chan0; 1722 struct channel *ch, *chan0;
1997 static struct tty_struct *tty; 1723 static struct tty_struct *tty;
1998 volatile struct board_info *bd; 1724 struct board_info *bd;
1999 volatile struct board_chan *bc; 1725 struct board_chan *bc;
2000 register volatile unsigned int tail, head; 1726 unsigned int tail, head;
2001 register int event, channel; 1727 int event, channel;
2002 register int mstat, lstat; 1728 int mstat, lstat;
2003 1729
2004 /* ------------------------------------------------------------------- 1730 /* -------------------------------------------------------------------
2005 This subroutine is called by epcapoll when an event is detected 1731 This subroutine is called by epcapoll when an event is detected
2006 in the event queue. This routine responds to those events. 1732 in the event queue. This routine responds to those events.
2007 --------------------------------------------------------------------- */ 1733 --------------------------------------------------------------------- */
2008
2009 bd = &boards[crd]; 1734 bd = &boards[crd];
2010 1735
2011 chan0 = card_ptr[crd]; 1736 chan0 = card_ptr[crd];
2012 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); 1737 epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range");
2013
2014 assertgwinon(chan0); 1738 assertgwinon(chan0);
2015 1739 while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein)))
2016 while ((tail = chan0->mailbox->eout) != (head = chan0->mailbox->ein))
2017 { /* Begin while something in event queue */ 1740 { /* Begin while something in event queue */
2018
2019 assertgwinon(chan0); 1741 assertgwinon(chan0);
2020 1742 eventbuf = bd->re_map_membase + tail + ISTART;
2021 eventbuf = (volatile unchar *)bus_to_virt((ulong)(bd->re_map_membase + tail + ISTART));
2022
2023 /* Get the channel the event occurred on */ 1743 /* Get the channel the event occurred on */
2024 channel = eventbuf[0]; 1744 channel = readb(eventbuf);
2025
2026 /* Get the actual event code that occurred */ 1745 /* Get the actual event code that occurred */
2027 event = eventbuf[1]; 1746 event = readb(eventbuf + 1);
2028
2029 /* ---------------------------------------------------------------- 1747 /* ----------------------------------------------------------------
2030 The two assignments below get the current modem status (mstat) 1748 The two assignments below get the current modem status (mstat)
2031 and the previous modem status (lstat). These are useful becuase 1749 and the previous modem status (lstat). These are useful becuase
2032 an event could signal a change in modem signals itself. 1750 an event could signal a change in modem signals itself.
2033 ------------------------------------------------------------------- */ 1751 ------------------------------------------------------------------- */
2034 1752 mstat = readb(eventbuf + 2);
2035 mstat = eventbuf[2]; 1753 lstat = readb(eventbuf + 3);
2036 lstat = eventbuf[3];
2037 1754
2038 ch = chan0 + channel; 1755 ch = chan0 + channel;
2039 1756 if ((unsigned)channel >= bd->numports || !ch) {
2040 if ((unsigned)channel >= bd->numports || !ch)
2041 {
2042 if (channel >= bd->numports) 1757 if (channel >= bd->numports)
2043 ch = chan0; 1758 ch = chan0;
2044 bc = ch->brdchan; 1759 bc = ch->brdchan;
@@ -2048,97 +1763,53 @@ static void doevent(int crd)
2048 if ((bc = ch->brdchan) == NULL) 1763 if ((bc = ch->brdchan) == NULL)
2049 goto next; 1764 goto next;
2050 1765
2051 if (event & DATA_IND) 1766 if (event & DATA_IND) { /* Begin DATA_IND */
2052 { /* Begin DATA_IND */
2053
2054 receive_data(ch); 1767 receive_data(ch);
2055 assertgwinon(ch); 1768 assertgwinon(ch);
2056
2057 } /* End DATA_IND */ 1769 } /* End DATA_IND */
2058 /* else *//* Fix for DCD transition missed bug */ 1770 /* else *//* Fix for DCD transition missed bug */
2059 if (event & MODEMCHG_IND) 1771 if (event & MODEMCHG_IND) { /* Begin MODEMCHG_IND */
2060 { /* Begin MODEMCHG_IND */
2061
2062 /* A modem signal change has been indicated */ 1772 /* A modem signal change has been indicated */
2063
2064 ch->imodem = mstat; 1773 ch->imodem = mstat;
2065 1774 if (ch->asyncflags & ASYNC_CHECK_CD) {
2066 if (ch->asyncflags & ASYNC_CHECK_CD)
2067 {
2068 if (mstat & ch->dcd) /* We are now receiving dcd */ 1775 if (mstat & ch->dcd) /* We are now receiving dcd */
2069 wake_up_interruptible(&ch->open_wait); 1776 wake_up_interruptible(&ch->open_wait);
2070 else 1777 else
2071 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */ 1778 pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */
2072 } 1779 }
2073
2074 } /* End MODEMCHG_IND */ 1780 } /* End MODEMCHG_IND */
2075
2076 tty = ch->tty; 1781 tty = ch->tty;
2077 if (tty) 1782 if (tty) { /* Begin if valid tty */
2078 { /* Begin if valid tty */ 1783 if (event & BREAK_IND) { /* Begin if BREAK_IND */
2079
2080 if (event & BREAK_IND)
2081 { /* Begin if BREAK_IND */
2082
2083 /* A break has been indicated */ 1784 /* A break has been indicated */
2084
2085 tty->flip.count++; 1785 tty->flip.count++;
2086 *tty->flip.flag_buf_ptr++ = TTY_BREAK; 1786 *tty->flip.flag_buf_ptr++ = TTY_BREAK;
2087
2088 *tty->flip.char_buf_ptr++ = 0; 1787 *tty->flip.char_buf_ptr++ = 0;
2089
2090 tty_schedule_flip(tty); 1788 tty_schedule_flip(tty);
2091 1789 } else if (event & LOWTX_IND) { /* Begin LOWTX_IND */
2092 } /* End if BREAK_IND */
2093 else
2094 if (event & LOWTX_IND)
2095 { /* Begin LOWTX_IND */
2096
2097 if (ch->statusflags & LOWWAIT) 1790 if (ch->statusflags & LOWWAIT)
2098 { /* Begin if LOWWAIT */ 1791 { /* Begin if LOWWAIT */
2099
2100 ch->statusflags &= ~LOWWAIT; 1792 ch->statusflags &= ~LOWWAIT;
2101 tty_wakeup(tty); 1793 tty_wakeup(tty);
2102 wake_up_interruptible(&tty->write_wait); 1794 wake_up_interruptible(&tty->write_wait);
2103
2104 } /* End if LOWWAIT */ 1795 } /* End if LOWWAIT */
2105 1796 } else if (event & EMPTYTX_IND) { /* Begin EMPTYTX_IND */
2106 } /* End LOWTX_IND */
2107 else
2108 if (event & EMPTYTX_IND)
2109 { /* Begin EMPTYTX_IND */
2110
2111 /* This event is generated by setup_empty_event */ 1797 /* This event is generated by setup_empty_event */
2112
2113 ch->statusflags &= ~TXBUSY; 1798 ch->statusflags &= ~TXBUSY;
2114 if (ch->statusflags & EMPTYWAIT) 1799 if (ch->statusflags & EMPTYWAIT) { /* Begin if EMPTYWAIT */
2115 { /* Begin if EMPTYWAIT */
2116
2117 ch->statusflags &= ~EMPTYWAIT; 1800 ch->statusflags &= ~EMPTYWAIT;
2118 tty_wakeup(tty); 1801 tty_wakeup(tty);
2119
2120 wake_up_interruptible(&tty->write_wait); 1802 wake_up_interruptible(&tty->write_wait);
2121
2122 } /* End if EMPTYWAIT */ 1803 } /* End if EMPTYWAIT */
2123
2124 } /* End EMPTYTX_IND */ 1804 } /* End EMPTYTX_IND */
2125
2126 } /* End if valid tty */ 1805 } /* End if valid tty */
2127
2128
2129 next: 1806 next:
2130 globalwinon(ch); 1807 globalwinon(ch);
2131 1808 BUG_ON(!bc);
2132 if (!bc) 1809 writew(1, &bc->idata);
2133 printk(KERN_ERR "<Error> - bc == NULL in doevent!\n"); 1810 writew((tail + 4) & (IMAX - ISTART - 4), &chan0->mailbox->eout);
2134 else
2135 bc->idata = 1;
2136
2137 chan0->mailbox->eout = (tail + 4) & (IMAX - ISTART - 4);
2138 globalwinon(chan0); 1811 globalwinon(chan0);
2139
2140 } /* End while something in event queue */ 1812 } /* End while something in event queue */
2141
2142} /* End doevent */ 1813} /* End doevent */
2143 1814
2144/* --------------------- Begin fepcmd ------------------------ */ 1815/* --------------------- Begin fepcmd ------------------------ */
@@ -2146,7 +1817,6 @@ static void doevent(int crd)
2146static void fepcmd(struct channel *ch, int cmd, int word_or_byte, 1817static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
2147 int byte2, int ncmds, int bytecmd) 1818 int byte2, int ncmds, int bytecmd)
2148{ /* Begin fepcmd */ 1819{ /* Begin fepcmd */
2149
2150 unchar *memaddr; 1820 unchar *memaddr;
2151 unsigned int head, cmdTail, cmdStart, cmdMax; 1821 unsigned int head, cmdTail, cmdStart, cmdMax;
2152 long count; 1822 long count;
@@ -2155,93 +1825,57 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
2155 /* This is the routine in which commands may be passed to the card. */ 1825 /* This is the routine in which commands may be passed to the card. */
2156 1826
2157 if (ch->board->status == DISABLED) 1827 if (ch->board->status == DISABLED)
2158 {
2159 return; 1828 return;
2160 }
2161
2162 assertgwinon(ch); 1829 assertgwinon(ch);
2163
2164 /* Remember head (As well as max) is just an offset not a base addr */ 1830 /* Remember head (As well as max) is just an offset not a base addr */
2165 head = ch->mailbox->cin; 1831 head = readw(&ch->mailbox->cin);
2166
2167 /* cmdStart is a base address */ 1832 /* cmdStart is a base address */
2168 cmdStart = ch->mailbox->cstart; 1833 cmdStart = readw(&ch->mailbox->cstart);
2169
2170 /* ------------------------------------------------------------------ 1834 /* ------------------------------------------------------------------
2171 We do the addition below because we do not want a max pointer 1835 We do the addition below because we do not want a max pointer
2172 relative to cmdStart. We want a max pointer that points at the 1836 relative to cmdStart. We want a max pointer that points at the
2173 physical end of the command queue. 1837 physical end of the command queue.
2174 -------------------------------------------------------------------- */ 1838 -------------------------------------------------------------------- */
2175 1839 cmdMax = (cmdStart + 4 + readw(&ch->mailbox->cmax));
2176 cmdMax = (cmdStart + 4 + (ch->mailbox->cmax));
2177
2178 memaddr = ch->board->re_map_membase; 1840 memaddr = ch->board->re_map_membase;
2179 1841
2180 /* 1842 if (head >= (cmdMax - cmdStart) || (head & 03)) {
2181 The below command is necessary because newer kernels (2.1.x and 1843 printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__, cmd, head);
2182 up) do not have a 1:1 virtual to physical mapping. The below 1844 printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__, cmdMax, cmdStart);
2183 call adjust for that.
2184 */
2185
2186 memaddr = (unsigned char *)bus_to_virt((unsigned long)memaddr);
2187
2188 if (head >= (cmdMax - cmdStart) || (head & 03))
2189 {
2190 printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__,
2191 cmd, head);
2192 printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__,
2193 cmdMax, cmdStart);
2194 return; 1845 return;
2195 } 1846 }
2196 1847 if (bytecmd) {
2197 if (bytecmd) 1848 writeb(cmd, memaddr + head + cmdStart + 0);
2198 { 1849 writeb(ch->channelnum, memaddr + head + cmdStart + 1);
2199 *(volatile unchar *)(memaddr + head + cmdStart + 0) = (unchar)cmd;
2200
2201 *(volatile unchar *)(memaddr + head + cmdStart + 1) = (unchar)ch->channelnum;
2202 /* Below word_or_byte is bits to set */ 1850 /* Below word_or_byte is bits to set */
2203 *(volatile unchar *)(memaddr + head + cmdStart + 2) = (unchar)word_or_byte; 1851 writeb(word_or_byte, memaddr + head + cmdStart + 2);
2204 /* Below byte2 is bits to reset */ 1852 /* Below byte2 is bits to reset */
2205 *(volatile unchar *)(memaddr + head + cmdStart + 3) = (unchar)byte2; 1853 writeb(byte2, memaddr + head + cmdStart + 3);
2206 1854 } else {
2207 } 1855 writeb(cmd, memaddr + head + cmdStart + 0);
2208 else 1856 writeb(ch->channelnum, memaddr + head + cmdStart + 1);
2209 { 1857 writeb(word_or_byte, memaddr + head + cmdStart + 2);
2210 *(volatile unchar *)(memaddr + head + cmdStart + 0) = (unchar)cmd;
2211 *(volatile unchar *)(memaddr + head + cmdStart + 1) = (unchar)ch->channelnum;
2212 *(volatile ushort*)(memaddr + head + cmdStart + 2) = (ushort)word_or_byte;
2213 } 1858 }
2214
2215 head = (head + 4) & (cmdMax - cmdStart - 4); 1859 head = (head + 4) & (cmdMax - cmdStart - 4);
2216 ch->mailbox->cin = head; 1860 writew(head, &ch->mailbox->cin);
2217
2218 count = FEPTIMEOUT; 1861 count = FEPTIMEOUT;
2219 1862
2220 for (;;) 1863 for (;;) { /* Begin forever loop */
2221 { /* Begin forever loop */
2222
2223 count--; 1864 count--;
2224 if (count == 0) 1865 if (count == 0) {
2225 {
2226 printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n"); 1866 printk(KERN_ERR "<Error> - Fep not responding in fepcmd()\n");
2227 return; 1867 return;
2228 } 1868 }
2229 1869 head = readw(&ch->mailbox->cin);
2230 head = ch->mailbox->cin; 1870 cmdTail = readw(&ch->mailbox->cout);
2231 cmdTail = ch->mailbox->cout;
2232
2233 n = (head - cmdTail) & (cmdMax - cmdStart - 4); 1871 n = (head - cmdTail) & (cmdMax - cmdStart - 4);
2234
2235 /* ---------------------------------------------------------- 1872 /* ----------------------------------------------------------
2236 Basically this will break when the FEP acknowledges the 1873 Basically this will break when the FEP acknowledges the
2237 command by incrementing cmdTail (Making it equal to head). 1874 command by incrementing cmdTail (Making it equal to head).
2238 ------------------------------------------------------------- */ 1875 ------------------------------------------------------------- */
2239
2240 if (n <= ncmds * (sizeof(short) * 4)) 1876 if (n <= ncmds * (sizeof(short) * 4))
2241 break; /* Well nearly forever :-) */ 1877 break; /* Well nearly forever :-) */
2242
2243 } /* End forever loop */ 1878 } /* End forever loop */
2244
2245} /* End fepcmd */ 1879} /* End fepcmd */
2246 1880
2247/* --------------------------------------------------------------------- 1881/* ---------------------------------------------------------------------
@@ -2255,11 +1889,9 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
2255 1889
2256static unsigned termios2digi_h(struct channel *ch, unsigned cflag) 1890static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
2257{ /* Begin termios2digi_h */ 1891{ /* Begin termios2digi_h */
2258
2259 unsigned res = 0; 1892 unsigned res = 0;
2260 1893
2261 if (cflag & CRTSCTS) 1894 if (cflag & CRTSCTS) {
2262 {
2263 ch->digiext.digi_flags |= (RTSPACE | CTSPACE); 1895 ch->digiext.digi_flags |= (RTSPACE | CTSPACE);
2264 res |= ((ch->m_cts) | (ch->m_rts)); 1896 res |= ((ch->m_cts) | (ch->m_rts));
2265 } 1897 }
@@ -2295,7 +1927,6 @@ static unsigned termios2digi_i(struct channel *ch, unsigned iflag)
2295 1927
2296 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | 1928 unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
2297 INPCK | ISTRIP|IXON|IXANY|IXOFF); 1929 INPCK | ISTRIP|IXON|IXANY|IXOFF);
2298
2299 if (ch->digiext.digi_flags & DIGI_AIXON) 1930 if (ch->digiext.digi_flags & DIGI_AIXON)
2300 res |= IAIXON; 1931 res |= IAIXON;
2301 return res; 1932 return res;
@@ -2308,28 +1939,15 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2308{ /* Begin termios2digi_c */ 1939{ /* Begin termios2digi_c */
2309 1940
2310 unsigned res = 0; 1941 unsigned res = 0;
2311 1942 if (cflag & CBAUDEX) { /* Begin detected CBAUDEX */
2312#ifdef SPEED_HACK
2313 /* CL: HACK to force 115200 at 38400 and 57600 at 19200 Baud */
2314 if ((cflag & CBAUD)== B38400) cflag=cflag - B38400 + B115200;
2315 if ((cflag & CBAUD)== B19200) cflag=cflag - B19200 + B57600;
2316#endif /* SPEED_HACK */
2317
2318 if (cflag & CBAUDEX)
2319 { /* Begin detected CBAUDEX */
2320
2321 ch->digiext.digi_flags |= DIGI_FAST; 1943 ch->digiext.digi_flags |= DIGI_FAST;
2322
2323 /* ------------------------------------------------------------- 1944 /* -------------------------------------------------------------
2324 HUPCL bit is used by FEP to indicate fast baud 1945 HUPCL bit is used by FEP to indicate fast baud
2325 table is to be used. 1946 table is to be used.
2326 ----------------------------------------------------------------- */ 1947 ----------------------------------------------------------------- */
2327
2328 res |= FEP_HUPCL; 1948 res |= FEP_HUPCL;
2329
2330 } /* End detected CBAUDEX */ 1949 } /* End detected CBAUDEX */
2331 else ch->digiext.digi_flags &= ~DIGI_FAST; 1950 else ch->digiext.digi_flags &= ~DIGI_FAST;
2332
2333 /* ------------------------------------------------------------------- 1951 /* -------------------------------------------------------------------
2334 CBAUD has bit position 0x1000 set these days to indicate Linux 1952 CBAUD has bit position 0x1000 set these days to indicate Linux
2335 baud rate remap. Digi hardware can't handle the bit assignment. 1953 baud rate remap. Digi hardware can't handle the bit assignment.
@@ -2337,7 +1955,6 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2337 bit out. 1955 bit out.
2338 ---------------------------------------------------------------------- */ 1956 ---------------------------------------------------------------------- */
2339 res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE); 1957 res |= cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
2340
2341 /* ------------------------------------------------------------- 1958 /* -------------------------------------------------------------
2342 This gets a little confusing. The Digi cards have their own 1959 This gets a little confusing. The Digi cards have their own
2343 representation of c_cflags controling baud rate. For the most 1960 representation of c_cflags controling baud rate. For the most
@@ -2357,10 +1974,8 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2357 should be checked for a screened out prior to termios2digi_c 1974 should be checked for a screened out prior to termios2digi_c
2358 returning. Since CLOCAL isn't used by the board this can be 1975 returning. Since CLOCAL isn't used by the board this can be
2359 ignored as long as the returned value is used only by Digi hardware. 1976 ignored as long as the returned value is used only by Digi hardware.
2360 ----------------------------------------------------------------- */ 1977 ----------------------------------------------------------------- */
2361 1978 if (cflag & CBAUDEX) {
2362 if (cflag & CBAUDEX)
2363 {
2364 /* ------------------------------------------------------------- 1979 /* -------------------------------------------------------------
2365 The below code is trying to guarantee that only baud rates 1980 The below code is trying to guarantee that only baud rates
2366 115200 and 230400 are remapped. We use exclusive or because 1981 115200 and 230400 are remapped. We use exclusive or because
@@ -2371,138 +1986,96 @@ static unsigned termios2digi_c(struct channel *ch, unsigned cflag)
2371 1986
2372 if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) || 1987 if ((!((cflag & 0x7) ^ (B115200 & ~CBAUDEX))) ||
2373 (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX)))) 1988 (!((cflag & 0x7) ^ (B230400 & ~CBAUDEX))))
2374 {
2375 res += 1; 1989 res += 1;
2376 }
2377 } 1990 }
2378
2379 return res; 1991 return res;
2380 1992
2381} /* End termios2digi_c */ 1993} /* End termios2digi_c */
2382 1994
2383/* --------------------- Begin epcaparam ----------------------- */ 1995/* --------------------- Begin epcaparam ----------------------- */
2384 1996
1997/* Caller must hold the locks */
2385static void epcaparam(struct tty_struct *tty, struct channel *ch) 1998static void epcaparam(struct tty_struct *tty, struct channel *ch)
2386{ /* Begin epcaparam */ 1999{ /* Begin epcaparam */
2387 2000
2388 unsigned int cmdHead; 2001 unsigned int cmdHead;
2389 struct termios *ts; 2002 struct termios *ts;
2390 volatile struct board_chan *bc; 2003 struct board_chan *bc;
2391 unsigned mval, hflow, cflag, iflag; 2004 unsigned mval, hflow, cflag, iflag;
2392 2005
2393 bc = ch->brdchan; 2006 bc = ch->brdchan;
2394 epcaassert(bc !=0, "bc out of range"); 2007 epcaassert(bc !=0, "bc out of range");
2395 2008
2396 assertgwinon(ch); 2009 assertgwinon(ch);
2397
2398 ts = tty->termios; 2010 ts = tty->termios;
2399 2011 if ((ts->c_cflag & CBAUD) == 0) { /* Begin CBAUD detected */
2400 if ((ts->c_cflag & CBAUD) == 0) 2012 cmdHead = readw(&bc->rin);
2401 { /* Begin CBAUD detected */
2402
2403 cmdHead = bc->rin;
2404 bc->rout = cmdHead; 2013 bc->rout = cmdHead;
2405 cmdHead = bc->tin; 2014 cmdHead = readw(&bc->tin);
2406
2407 /* Changing baud in mid-stream transmission can be wonderful */ 2015 /* Changing baud in mid-stream transmission can be wonderful */
2408 /* --------------------------------------------------------------- 2016 /* ---------------------------------------------------------------
2409 Flush current transmit buffer by setting cmdTail pointer (tout) 2017 Flush current transmit buffer by setting cmdTail pointer (tout)
2410 to cmdHead pointer (tin). Hopefully the transmit buffer is empty. 2018 to cmdHead pointer (tin). Hopefully the transmit buffer is empty.
2411 ----------------------------------------------------------------- */ 2019 ----------------------------------------------------------------- */
2412
2413 fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0); 2020 fepcmd(ch, STOUT, (unsigned) cmdHead, 0, 0, 0);
2414 mval = 0; 2021 mval = 0;
2415 2022 } else { /* Begin CBAUD not detected */
2416 } /* End CBAUD detected */
2417 else
2418 { /* Begin CBAUD not detected */
2419
2420 /* ------------------------------------------------------------------- 2023 /* -------------------------------------------------------------------
2421 c_cflags have changed but that change had nothing to do with BAUD. 2024 c_cflags have changed but that change had nothing to do with BAUD.
2422 Propagate the change to the card. 2025 Propagate the change to the card.
2423 ---------------------------------------------------------------------- */ 2026 ---------------------------------------------------------------------- */
2424
2425 cflag = termios2digi_c(ch, ts->c_cflag); 2027 cflag = termios2digi_c(ch, ts->c_cflag);
2426 2028 if (cflag != ch->fepcflag) {
2427 if (cflag != ch->fepcflag)
2428 {
2429 ch->fepcflag = cflag; 2029 ch->fepcflag = cflag;
2430 /* Set baud rate, char size, stop bits, parity */ 2030 /* Set baud rate, char size, stop bits, parity */
2431 fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0); 2031 fepcmd(ch, SETCTRLFLAGS, (unsigned) cflag, 0, 0, 0);
2432 } 2032 }
2433
2434
2435 /* ---------------------------------------------------------------- 2033 /* ----------------------------------------------------------------
2436 If the user has not forced CLOCAL and if the device is not a 2034 If the user has not forced CLOCAL and if the device is not a
2437 CALLOUT device (Which is always CLOCAL) we set flags such that 2035 CALLOUT device (Which is always CLOCAL) we set flags such that
2438 the driver will wait on carrier detect. 2036 the driver will wait on carrier detect.
2439 ------------------------------------------------------------------- */ 2037 ------------------------------------------------------------------- */
2440
2441 if (ts->c_cflag & CLOCAL) 2038 if (ts->c_cflag & CLOCAL)
2442 { /* Begin it is a cud device or a ttyD device with CLOCAL on */
2443 ch->asyncflags &= ~ASYNC_CHECK_CD; 2039 ch->asyncflags &= ~ASYNC_CHECK_CD;
2444 } /* End it is a cud device or a ttyD device with CLOCAL on */
2445 else 2040 else
2446 { /* Begin it is a ttyD device */
2447 ch->asyncflags |= ASYNC_CHECK_CD; 2041 ch->asyncflags |= ASYNC_CHECK_CD;
2448 } /* End it is a ttyD device */
2449
2450 mval = ch->m_dtr | ch->m_rts; 2042 mval = ch->m_dtr | ch->m_rts;
2451
2452 } /* End CBAUD not detected */ 2043 } /* End CBAUD not detected */
2453
2454 iflag = termios2digi_i(ch, ts->c_iflag); 2044 iflag = termios2digi_i(ch, ts->c_iflag);
2455
2456 /* Check input mode flags */ 2045 /* Check input mode flags */
2457 2046 if (iflag != ch->fepiflag) {
2458 if (iflag != ch->fepiflag)
2459 {
2460 ch->fepiflag = iflag; 2047 ch->fepiflag = iflag;
2461
2462 /* --------------------------------------------------------------- 2048 /* ---------------------------------------------------------------
2463 Command sets channels iflag structure on the board. Such things 2049 Command sets channels iflag structure on the board. Such things
2464 as input soft flow control, handling of parity errors, and 2050 as input soft flow control, handling of parity errors, and
2465 break handling are all set here. 2051 break handling are all set here.
2466 ------------------------------------------------------------------- */ 2052 ------------------------------------------------------------------- */
2467
2468 /* break handling, parity handling, input stripping, flow control chars */ 2053 /* break handling, parity handling, input stripping, flow control chars */
2469 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); 2054 fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0);
2470 } 2055 }
2471
2472 /* --------------------------------------------------------------- 2056 /* ---------------------------------------------------------------
2473 Set the board mint value for this channel. This will cause hardware 2057 Set the board mint value for this channel. This will cause hardware
2474 events to be generated each time the DCD signal (Described in mint) 2058 events to be generated each time the DCD signal (Described in mint)
2475 changes. 2059 changes.
2476 ------------------------------------------------------------------- */ 2060 ------------------------------------------------------------------- */
2477 bc->mint = ch->dcd; 2061 writeb(ch->dcd, &bc->mint);
2478
2479 if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD)) 2062 if ((ts->c_cflag & CLOCAL) || (ch->digiext.digi_flags & DIGI_FORCEDCD))
2480 if (ch->digiext.digi_flags & DIGI_FORCEDCD) 2063 if (ch->digiext.digi_flags & DIGI_FORCEDCD)
2481 bc->mint = 0; 2064 writeb(0, &bc->mint);
2482 2065 ch->imodem = readb(&bc->mstat);
2483 ch->imodem = bc->mstat;
2484
2485 hflow = termios2digi_h(ch, ts->c_cflag); 2066 hflow = termios2digi_h(ch, ts->c_cflag);
2486 2067 if (hflow != ch->hflow) {
2487 if (hflow != ch->hflow)
2488 {
2489 ch->hflow = hflow; 2068 ch->hflow = hflow;
2490
2491 /* -------------------------------------------------------------- 2069 /* --------------------------------------------------------------
2492 Hard flow control has been selected but the board is not 2070 Hard flow control has been selected but the board is not
2493 using it. Activate hard flow control now. 2071 using it. Activate hard flow control now.
2494 ----------------------------------------------------------------- */ 2072 ----------------------------------------------------------------- */
2495
2496 fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1); 2073 fepcmd(ch, SETHFLOW, hflow, 0xff, 0, 1);
2497 } 2074 }
2498
2499
2500 mval ^= ch->modemfake & (mval ^ ch->modem); 2075 mval ^= ch->modemfake & (mval ^ ch->modem);
2501 2076
2502 if (ch->omodem ^ mval) 2077 if (ch->omodem ^ mval) {
2503 {
2504 ch->omodem = mval; 2078 ch->omodem = mval;
2505
2506 /* -------------------------------------------------------------- 2079 /* --------------------------------------------------------------
2507 The below command sets the DTR and RTS mstat structure. If 2080 The below command sets the DTR and RTS mstat structure. If
2508 hard flow control is NOT active these changes will drive the 2081 hard flow control is NOT active these changes will drive the
@@ -2514,87 +2087,65 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
2514 /* First reset DTR & RTS; then set them */ 2087 /* First reset DTR & RTS; then set them */
2515 fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1); 2088 fepcmd(ch, SETMODEM, 0, ((ch->m_dtr)|(ch->m_rts)), 0, 1);
2516 fepcmd(ch, SETMODEM, mval, 0, 0, 1); 2089 fepcmd(ch, SETMODEM, mval, 0, 0, 1);
2517
2518 } 2090 }
2519 2091 if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc) {
2520 if (ch->startc != ch->fepstartc || ch->stopc != ch->fepstopc)
2521 {
2522 ch->fepstartc = ch->startc; 2092 ch->fepstartc = ch->startc;
2523 ch->fepstopc = ch->stopc; 2093 ch->fepstopc = ch->stopc;
2524
2525 /* ------------------------------------------------------------ 2094 /* ------------------------------------------------------------
2526 The XON / XOFF characters have changed; propagate these 2095 The XON / XOFF characters have changed; propagate these
2527 changes to the card. 2096 changes to the card.
2528 --------------------------------------------------------------- */ 2097 --------------------------------------------------------------- */
2529
2530 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); 2098 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
2531 } 2099 }
2532 2100 if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca) {
2533 if (ch->startca != ch->fepstartca || ch->stopca != ch->fepstopca)
2534 {
2535 ch->fepstartca = ch->startca; 2101 ch->fepstartca = ch->startca;
2536 ch->fepstopca = ch->stopca; 2102 ch->fepstopca = ch->stopca;
2537
2538 /* --------------------------------------------------------------- 2103 /* ---------------------------------------------------------------
2539 Similar to the above, this time the auxilarly XON / XOFF 2104 Similar to the above, this time the auxilarly XON / XOFF
2540 characters have changed; propagate these changes to the card. 2105 characters have changed; propagate these changes to the card.
2541 ------------------------------------------------------------------ */ 2106 ------------------------------------------------------------------ */
2542
2543 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); 2107 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
2544 } 2108 }
2545
2546} /* End epcaparam */ 2109} /* End epcaparam */
2547 2110
2548/* --------------------- Begin receive_data ----------------------- */ 2111/* --------------------- Begin receive_data ----------------------- */
2549 2112/* Caller holds lock */
2550static void receive_data(struct channel *ch) 2113static void receive_data(struct channel *ch)
2551{ /* Begin receive_data */ 2114{ /* Begin receive_data */
2552 2115
2553 unchar *rptr; 2116 unchar *rptr;
2554 struct termios *ts = NULL; 2117 struct termios *ts = NULL;
2555 struct tty_struct *tty; 2118 struct tty_struct *tty;
2556 volatile struct board_chan *bc; 2119 struct board_chan *bc;
2557 register int dataToRead, wrapgap, bytesAvailable; 2120 int dataToRead, wrapgap, bytesAvailable;
2558 register unsigned int tail, head; 2121 unsigned int tail, head;
2559 unsigned int wrapmask; 2122 unsigned int wrapmask;
2560 int rc; 2123 int rc;
2561 2124
2562
2563 /* --------------------------------------------------------------- 2125 /* ---------------------------------------------------------------
2564 This routine is called by doint when a receive data event 2126 This routine is called by doint when a receive data event
2565 has taken place. 2127 has taken place.
2566 ------------------------------------------------------------------- */ 2128 ------------------------------------------------------------------- */
2567 2129
2568 globalwinon(ch); 2130 globalwinon(ch);
2569
2570 if (ch->statusflags & RXSTOPPED) 2131 if (ch->statusflags & RXSTOPPED)
2571 return; 2132 return;
2572
2573 tty = ch->tty; 2133 tty = ch->tty;
2574 if (tty) 2134 if (tty)
2575 ts = tty->termios; 2135 ts = tty->termios;
2576
2577 bc = ch->brdchan; 2136 bc = ch->brdchan;
2578 2137 BUG_ON(!bc);
2579 if (!bc)
2580 {
2581 printk(KERN_ERR "<Error> - bc is NULL in receive_data!\n");
2582 return;
2583 }
2584
2585 wrapmask = ch->rxbufsize - 1; 2138 wrapmask = ch->rxbufsize - 1;
2586 2139
2587 /* --------------------------------------------------------------------- 2140 /* ---------------------------------------------------------------------
2588 Get the head and tail pointers to the receiver queue. Wrap the 2141 Get the head and tail pointers to the receiver queue. Wrap the
2589 head pointer if it has reached the end of the buffer. 2142 head pointer if it has reached the end of the buffer.
2590 ------------------------------------------------------------------------ */ 2143 ------------------------------------------------------------------------ */
2591 2144 head = readw(&bc->rin);
2592 head = bc->rin;
2593 head &= wrapmask; 2145 head &= wrapmask;
2594 tail = bc->rout & wrapmask; 2146 tail = readw(&bc->rout) & wrapmask;
2595 2147
2596 bytesAvailable = (head - tail) & wrapmask; 2148 bytesAvailable = (head - tail) & wrapmask;
2597
2598 if (bytesAvailable == 0) 2149 if (bytesAvailable == 0)
2599 return; 2150 return;
2600 2151
@@ -2602,8 +2153,7 @@ static void receive_data(struct channel *ch)
2602 If CREAD bit is off or device not open, set TX tail to head 2153 If CREAD bit is off or device not open, set TX tail to head
2603 --------------------------------------------------------------------- */ 2154 --------------------------------------------------------------------- */
2604 2155
2605 if (!tty || !ts || !(ts->c_cflag & CREAD)) 2156 if (!tty || !ts || !(ts->c_cflag & CREAD)) {
2606 {
2607 bc->rout = head; 2157 bc->rout = head;
2608 return; 2158 return;
2609 } 2159 }
@@ -2611,64 +2161,45 @@ static void receive_data(struct channel *ch)
2611 if (tty->flip.count == TTY_FLIPBUF_SIZE) 2161 if (tty->flip.count == TTY_FLIPBUF_SIZE)
2612 return; 2162 return;
2613 2163
2614 if (bc->orun) 2164 if (readb(&bc->orun)) {
2615 { 2165 writeb(0, &bc->orun);
2616 bc->orun = 0; 2166 printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",tty->name);
2617 printk(KERN_WARNING "overrun! DigiBoard device %s\n",tty->name);
2618 } 2167 }
2619
2620 rxwinon(ch); 2168 rxwinon(ch);
2621 rptr = tty->flip.char_buf_ptr; 2169 rptr = tty->flip.char_buf_ptr;
2622 rc = tty->flip.count; 2170 rc = tty->flip.count;
2623 2171 while (bytesAvailable > 0) { /* Begin while there is data on the card */
2624 while (bytesAvailable > 0)
2625 { /* Begin while there is data on the card */
2626
2627 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; 2172 wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
2628
2629 /* --------------------------------------------------------------- 2173 /* ---------------------------------------------------------------
2630 Even if head has wrapped around only report the amount of 2174 Even if head has wrapped around only report the amount of
2631 data to be equal to the size - tail. Remember memcpy can't 2175 data to be equal to the size - tail. Remember memcpy can't
2632 automaticly wrap around the receive buffer. 2176 automaticly wrap around the receive buffer.
2633 ----------------------------------------------------------------- */ 2177 ----------------------------------------------------------------- */
2634
2635 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable; 2178 dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable;
2636
2637 /* -------------------------------------------------------------- 2179 /* --------------------------------------------------------------
2638 Make sure we don't overflow the buffer 2180 Make sure we don't overflow the buffer
2639 ----------------------------------------------------------------- */ 2181 ----------------------------------------------------------------- */
2640
2641 if ((rc + dataToRead) > TTY_FLIPBUF_SIZE) 2182 if ((rc + dataToRead) > TTY_FLIPBUF_SIZE)
2642 dataToRead = TTY_FLIPBUF_SIZE - rc; 2183 dataToRead = TTY_FLIPBUF_SIZE - rc;
2643
2644 if (dataToRead == 0) 2184 if (dataToRead == 0)
2645 break; 2185 break;
2646
2647 /* --------------------------------------------------------------- 2186 /* ---------------------------------------------------------------
2648 Move data read from our card into the line disciplines buffer 2187 Move data read from our card into the line disciplines buffer
2649 for translation if necessary. 2188 for translation if necessary.
2650 ------------------------------------------------------------------ */ 2189 ------------------------------------------------------------------ */
2651 2190 memcpy_fromio(rptr, ch->rxptr + tail, dataToRead);
2652 if ((memcpy(rptr, ch->rxptr + tail, dataToRead)) != rptr)
2653 printk(KERN_ERR "<Error> - receive_data : memcpy failed\n");
2654
2655 rc += dataToRead; 2191 rc += dataToRead;
2656 rptr += dataToRead; 2192 rptr += dataToRead;
2657 tail = (tail + dataToRead) & wrapmask; 2193 tail = (tail + dataToRead) & wrapmask;
2658 bytesAvailable -= dataToRead; 2194 bytesAvailable -= dataToRead;
2659
2660 } /* End while there is data on the card */ 2195 } /* End while there is data on the card */
2661
2662
2663 tty->flip.count = rc; 2196 tty->flip.count = rc;
2664 tty->flip.char_buf_ptr = rptr; 2197 tty->flip.char_buf_ptr = rptr;
2665 globalwinon(ch); 2198 globalwinon(ch);
2666 bc->rout = tail; 2199 writew(tail, &bc->rout);
2667
2668 /* Must be called with global data */ 2200 /* Must be called with global data */
2669 tty_schedule_flip(ch->tty); 2201 tty_schedule_flip(ch->tty);
2670 return; 2202 return;
2671
2672} /* End receive_data */ 2203} /* End receive_data */
2673 2204
2674static int info_ioctl(struct tty_struct *tty, struct file * file, 2205static int info_ioctl(struct tty_struct *tty, struct file * file,
@@ -2676,17 +2207,15 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2676{ 2207{
2677 switch (cmd) 2208 switch (cmd)
2678 { /* Begin switch cmd */ 2209 { /* Begin switch cmd */
2679
2680 case DIGI_GETINFO: 2210 case DIGI_GETINFO:
2681 { /* Begin case DIGI_GETINFO */ 2211 { /* Begin case DIGI_GETINFO */
2682
2683 struct digi_info di ; 2212 struct digi_info di ;
2684 int brd; 2213 int brd;
2685 2214
2686 getUser(brd, (unsigned int __user *)arg); 2215 if(get_user(brd, (unsigned int __user *)arg))
2687 2216 return -EFAULT;
2688 if ((brd < 0) || (brd >= num_cards) || (num_cards == 0)) 2217 if (brd < 0 || brd >= num_cards || num_cards == 0)
2689 return (-ENODEV); 2218 return -ENODEV;
2690 2219
2691 memset(&di, 0, sizeof(di)); 2220 memset(&di, 0, sizeof(di));
2692 2221
@@ -2694,8 +2223,9 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2694 di.status = boards[brd].status; 2223 di.status = boards[brd].status;
2695 di.type = boards[brd].type ; 2224 di.type = boards[brd].type ;
2696 di.numports = boards[brd].numports ; 2225 di.numports = boards[brd].numports ;
2697 di.port = boards[brd].port ; 2226 /* Legacy fixups - just move along nothing to see */
2698 di.membase = boards[brd].membase ; 2227 di.port = (unsigned char *)boards[brd].port ;
2228 di.membase = (unsigned char *)boards[brd].membase ;
2699 2229
2700 if (copy_to_user((void __user *)arg, &di, sizeof (di))) 2230 if (copy_to_user((void __user *)arg, &di, sizeof (di)))
2701 return -EFAULT; 2231 return -EFAULT;
@@ -2709,39 +2239,29 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2709 int brd = arg & 0xff000000 >> 16 ; 2239 int brd = arg & 0xff000000 >> 16 ;
2710 unsigned char state = arg & 0xff ; 2240 unsigned char state = arg & 0xff ;
2711 2241
2712 if ((brd < 0) || (brd >= num_cards)) 2242 if (brd < 0 || brd >= num_cards) {
2713 { 2243 printk(KERN_ERR "epca: DIGI POLLER : brd not valid!\n");
2714 printk(KERN_ERR "<Error> - DIGI POLLER : brd not valid!\n");
2715 return (-ENODEV); 2244 return (-ENODEV);
2716 } 2245 }
2717
2718 digi_poller_inhibited = state ; 2246 digi_poller_inhibited = state ;
2719 break ; 2247 break ;
2720
2721 } /* End case DIGI_POLLER */ 2248 } /* End case DIGI_POLLER */
2722 2249
2723 case DIGI_INIT: 2250 case DIGI_INIT:
2724 { /* Begin case DIGI_INIT */ 2251 { /* Begin case DIGI_INIT */
2725
2726 /* ------------------------------------------------------------ 2252 /* ------------------------------------------------------------
2727 This call is made by the apps to complete the initilization 2253 This call is made by the apps to complete the initilization
2728 of the board(s). This routine is responsible for setting 2254 of the board(s). This routine is responsible for setting
2729 the card to its initial state and setting the drivers control 2255 the card to its initial state and setting the drivers control
2730 fields to the sutianle settings for the card in question. 2256 fields to the sutianle settings for the card in question.
2731 ---------------------------------------------------------------- */ 2257 ---------------------------------------------------------------- */
2732
2733 int crd ; 2258 int crd ;
2734 for (crd = 0; crd < num_cards; crd++) 2259 for (crd = 0; crd < num_cards; crd++)
2735 post_fep_init (crd); 2260 post_fep_init (crd);
2736
2737 break ; 2261 break ;
2738
2739 } /* End case DIGI_INIT */ 2262 } /* End case DIGI_INIT */
2740
2741
2742 default: 2263 default:
2743 return -ENOIOCTLCMD; 2264 return -ENOTTY;
2744
2745 } /* End switch cmd */ 2265 } /* End switch cmd */
2746 return (0) ; 2266 return (0) ;
2747} 2267}
@@ -2750,43 +2270,33 @@ static int info_ioctl(struct tty_struct *tty, struct file * file,
2750static int pc_tiocmget(struct tty_struct *tty, struct file *file) 2270static int pc_tiocmget(struct tty_struct *tty, struct file *file)
2751{ 2271{
2752 struct channel *ch = (struct channel *) tty->driver_data; 2272 struct channel *ch = (struct channel *) tty->driver_data;
2753 volatile struct board_chan *bc; 2273 struct board_chan *bc;
2754 unsigned int mstat, mflag = 0; 2274 unsigned int mstat, mflag = 0;
2755 unsigned long flags; 2275 unsigned long flags;
2756 2276
2757 if (ch) 2277 if (ch)
2758 bc = ch->brdchan; 2278 bc = ch->brdchan;
2759 else 2279 else
2760 { 2280 return -EINVAL;
2761 printk(KERN_ERR "<Error> - ch is NULL in pc_tiocmget!\n");
2762 return(-EINVAL);
2763 }
2764 2281
2765 save_flags(flags); 2282 spin_lock_irqsave(&epca_lock, flags);
2766 cli();
2767 globalwinon(ch); 2283 globalwinon(ch);
2768 mstat = bc->mstat; 2284 mstat = readb(&bc->mstat);
2769 memoff(ch); 2285 memoff(ch);
2770 restore_flags(flags); 2286 spin_unlock_irqrestore(&epca_lock, flags);
2771 2287
2772 if (mstat & ch->m_dtr) 2288 if (mstat & ch->m_dtr)
2773 mflag |= TIOCM_DTR; 2289 mflag |= TIOCM_DTR;
2774
2775 if (mstat & ch->m_rts) 2290 if (mstat & ch->m_rts)
2776 mflag |= TIOCM_RTS; 2291 mflag |= TIOCM_RTS;
2777
2778 if (mstat & ch->m_cts) 2292 if (mstat & ch->m_cts)
2779 mflag |= TIOCM_CTS; 2293 mflag |= TIOCM_CTS;
2780
2781 if (mstat & ch->dsr) 2294 if (mstat & ch->dsr)
2782 mflag |= TIOCM_DSR; 2295 mflag |= TIOCM_DSR;
2783
2784 if (mstat & ch->m_ri) 2296 if (mstat & ch->m_ri)
2785 mflag |= TIOCM_RI; 2297 mflag |= TIOCM_RI;
2786
2787 if (mstat & ch->dcd) 2298 if (mstat & ch->dcd)
2788 mflag |= TIOCM_CD; 2299 mflag |= TIOCM_CD;
2789
2790 return mflag; 2300 return mflag;
2791} 2301}
2792 2302
@@ -2796,13 +2306,10 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2796 struct channel *ch = (struct channel *) tty->driver_data; 2306 struct channel *ch = (struct channel *) tty->driver_data;
2797 unsigned long flags; 2307 unsigned long flags;
2798 2308
2799 if (!ch) { 2309 if (!ch)
2800 printk(KERN_ERR "<Error> - ch is NULL in pc_tiocmset!\n"); 2310 return -EINVAL;
2801 return(-EINVAL);
2802 }
2803 2311
2804 save_flags(flags); 2312 spin_lock_irqsave(&epca_lock, flags);
2805 cli();
2806 /* 2313 /*
2807 * I think this modemfake stuff is broken. It doesn't 2314 * I think this modemfake stuff is broken. It doesn't
2808 * correctly reflect the behaviour desired by the TIOCM* 2315 * correctly reflect the behaviour desired by the TIOCM*
@@ -2824,17 +2331,14 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
2824 ch->modemfake |= ch->m_dtr; 2331 ch->modemfake |= ch->m_dtr;
2825 ch->modem &= ~ch->m_dtr; 2332 ch->modem &= ~ch->m_dtr;
2826 } 2333 }
2827
2828 globalwinon(ch); 2334 globalwinon(ch);
2829
2830 /* -------------------------------------------------------------- 2335 /* --------------------------------------------------------------
2831 The below routine generally sets up parity, baud, flow control 2336 The below routine generally sets up parity, baud, flow control
2832 issues, etc.... It effect both control flags and input flags. 2337 issues, etc.... It effect both control flags and input flags.
2833 ------------------------------------------------------------------ */ 2338 ------------------------------------------------------------------ */
2834
2835 epcaparam(tty,ch); 2339 epcaparam(tty,ch);
2836 memoff(ch); 2340 memoff(ch);
2837 restore_flags(flags); 2341 spin_unlock_irqrestore(&epca_lock, flags);
2838 return 0; 2342 return 0;
2839} 2343}
2840 2344
@@ -2847,19 +2351,14 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2847 unsigned long flags; 2351 unsigned long flags;
2848 unsigned int mflag, mstat; 2352 unsigned int mflag, mstat;
2849 unsigned char startc, stopc; 2353 unsigned char startc, stopc;
2850 volatile struct board_chan *bc; 2354 struct board_chan *bc;
2851 struct channel *ch = (struct channel *) tty->driver_data; 2355 struct channel *ch = (struct channel *) tty->driver_data;
2852 void __user *argp = (void __user *)arg; 2356 void __user *argp = (void __user *)arg;
2853 2357
2854 if (ch) 2358 if (ch)
2855 bc = ch->brdchan; 2359 bc = ch->brdchan;
2856 else 2360 else
2857 { 2361 return -EINVAL;
2858 printk(KERN_ERR "<Error> - ch is NULL in pc_ioctl!\n");
2859 return(-EINVAL);
2860 }
2861
2862 save_flags(flags);
2863 2362
2864 /* ------------------------------------------------------------------- 2363 /* -------------------------------------------------------------------
2865 For POSIX compliance we need to add more ioctls. See tty_ioctl.c 2364 For POSIX compliance we need to add more ioctls. See tty_ioctl.c
@@ -2871,46 +2370,39 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2871 { /* Begin switch cmd */ 2370 { /* Begin switch cmd */
2872 2371
2873 case TCGETS: 2372 case TCGETS:
2874 if (copy_to_user(argp, 2373 if (copy_to_user(argp, tty->termios, sizeof(struct termios)))
2875 tty->termios, sizeof(struct termios)))
2876 return -EFAULT; 2374 return -EFAULT;
2877 return(0); 2375 return 0;
2878
2879 case TCGETA: 2376 case TCGETA:
2880 return get_termio(tty, argp); 2377 return get_termio(tty, argp);
2881
2882 case TCSBRK: /* SVID version: non-zero arg --> no break */ 2378 case TCSBRK: /* SVID version: non-zero arg --> no break */
2883
2884 retval = tty_check_change(tty); 2379 retval = tty_check_change(tty);
2885 if (retval) 2380 if (retval)
2886 return retval; 2381 return retval;
2887
2888 /* Setup an event to indicate when the transmit buffer empties */ 2382 /* Setup an event to indicate when the transmit buffer empties */
2889 2383 spin_lock_irqsave(&epca_lock, flags);
2890 setup_empty_event(tty,ch); 2384 setup_empty_event(tty,ch);
2385 spin_unlock_irqrestore(&epca_lock, flags);
2891 tty_wait_until_sent(tty, 0); 2386 tty_wait_until_sent(tty, 0);
2892 if (!arg) 2387 if (!arg)
2893 digi_send_break(ch, HZ/4); /* 1/4 second */ 2388 digi_send_break(ch, HZ/4); /* 1/4 second */
2894 return 0; 2389 return 0;
2895
2896 case TCSBRKP: /* support for POSIX tcsendbreak() */ 2390 case TCSBRKP: /* support for POSIX tcsendbreak() */
2897
2898 retval = tty_check_change(tty); 2391 retval = tty_check_change(tty);
2899 if (retval) 2392 if (retval)
2900 return retval; 2393 return retval;
2901 2394
2902 /* Setup an event to indicate when the transmit buffer empties */ 2395 /* Setup an event to indicate when the transmit buffer empties */
2903 2396 spin_lock_irqsave(&epca_lock, flags);
2904 setup_empty_event(tty,ch); 2397 setup_empty_event(tty,ch);
2398 spin_unlock_irqrestore(&epca_lock, flags);
2905 tty_wait_until_sent(tty, 0); 2399 tty_wait_until_sent(tty, 0);
2906 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4); 2400 digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4);
2907 return 0; 2401 return 0;
2908
2909 case TIOCGSOFTCAR: 2402 case TIOCGSOFTCAR:
2910 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg)) 2403 if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
2911 return -EFAULT; 2404 return -EFAULT;
2912 return 0; 2405 return 0;
2913
2914 case TIOCSSOFTCAR: 2406 case TIOCSSOFTCAR:
2915 { 2407 {
2916 unsigned int value; 2408 unsigned int value;
@@ -2922,75 +2414,63 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
2922 (value ? CLOCAL : 0)); 2414 (value ? CLOCAL : 0));
2923 return 0; 2415 return 0;
2924 } 2416 }
2925
2926 case TIOCMODG: 2417 case TIOCMODG:
2927 mflag = pc_tiocmget(tty, file); 2418 mflag = pc_tiocmget(tty, file);
2928 if (put_user(mflag, (unsigned long __user *)argp)) 2419 if (put_user(mflag, (unsigned long __user *)argp))
2929 return -EFAULT; 2420 return -EFAULT;
2930 break; 2421 break;
2931
2932 case TIOCMODS: 2422 case TIOCMODS:
2933 if (get_user(mstat, (unsigned __user *)argp)) 2423 if (get_user(mstat, (unsigned __user *)argp))
2934 return -EFAULT; 2424 return -EFAULT;
2935 return pc_tiocmset(tty, file, mstat, ~mstat); 2425 return pc_tiocmset(tty, file, mstat, ~mstat);
2936
2937 case TIOCSDTR: 2426 case TIOCSDTR:
2427 spin_lock_irqsave(&epca_lock, flags);
2938 ch->omodem |= ch->m_dtr; 2428 ch->omodem |= ch->m_dtr;
2939 cli();
2940 globalwinon(ch); 2429 globalwinon(ch);
2941 fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1); 2430 fepcmd(ch, SETMODEM, ch->m_dtr, 0, 10, 1);
2942 memoff(ch); 2431 memoff(ch);
2943 restore_flags(flags); 2432 spin_unlock_irqrestore(&epca_lock, flags);
2944 break; 2433 break;
2945 2434
2946 case TIOCCDTR: 2435 case TIOCCDTR:
2436 spin_lock_irqsave(&epca_lock, flags);
2947 ch->omodem &= ~ch->m_dtr; 2437 ch->omodem &= ~ch->m_dtr;
2948 cli();
2949 globalwinon(ch); 2438 globalwinon(ch);
2950 fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1); 2439 fepcmd(ch, SETMODEM, 0, ch->m_dtr, 10, 1);
2951 memoff(ch); 2440 memoff(ch);
2952 restore_flags(flags); 2441 spin_unlock_irqrestore(&epca_lock, flags);
2953 break; 2442 break;
2954
2955 case DIGI_GETA: 2443 case DIGI_GETA:
2956 if (copy_to_user(argp, &ch->digiext, sizeof(digi_t))) 2444 if (copy_to_user(argp, &ch->digiext, sizeof(digi_t)))
2957 return -EFAULT; 2445 return -EFAULT;
2958 break; 2446 break;
2959
2960 case DIGI_SETAW: 2447 case DIGI_SETAW:
2961 case DIGI_SETAF: 2448 case DIGI_SETAF:
2962 if ((cmd) == (DIGI_SETAW)) 2449 if (cmd == DIGI_SETAW) {
2963 {
2964 /* Setup an event to indicate when the transmit buffer empties */ 2450 /* Setup an event to indicate when the transmit buffer empties */
2965 2451 spin_lock_irqsave(&epca_lock, flags);
2966 setup_empty_event(tty,ch); 2452 setup_empty_event(tty,ch);
2453 spin_unlock_irqrestore(&epca_lock, flags);
2967 tty_wait_until_sent(tty, 0); 2454 tty_wait_until_sent(tty, 0);
2968 } 2455 } else {
2969 else
2970 {
2971 /* ldisc lock already held in ioctl */ 2456 /* ldisc lock already held in ioctl */
2972 if (tty->ldisc.flush_buffer) 2457 if (tty->ldisc.flush_buffer)
2973 tty->ldisc.flush_buffer(tty); 2458 tty->ldisc.flush_buffer(tty);
2974 } 2459 }
2975
2976 /* Fall Thru */ 2460 /* Fall Thru */
2977
2978 case DIGI_SETA: 2461 case DIGI_SETA:
2979 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t))) 2462 if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
2980 return -EFAULT; 2463 return -EFAULT;
2981 2464
2982 if (ch->digiext.digi_flags & DIGI_ALTPIN) 2465 if (ch->digiext.digi_flags & DIGI_ALTPIN) {
2983 {
2984 ch->dcd = ch->m_dsr; 2466 ch->dcd = ch->m_dsr;
2985 ch->dsr = ch->m_dcd; 2467 ch->dsr = ch->m_dcd;
2986 } 2468 } else {
2987 else
2988 {
2989 ch->dcd = ch->m_dcd; 2469 ch->dcd = ch->m_dcd;
2990 ch->dsr = ch->m_dsr; 2470 ch->dsr = ch->m_dsr;
2991 } 2471 }
2992 2472
2993 cli(); 2473 spin_lock_irqsave(&epca_lock, flags);
2994 globalwinon(ch); 2474 globalwinon(ch);
2995 2475
2996 /* ----------------------------------------------------------------- 2476 /* -----------------------------------------------------------------
@@ -3000,25 +2480,22 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
3000 2480
3001 epcaparam(tty,ch); 2481 epcaparam(tty,ch);
3002 memoff(ch); 2482 memoff(ch);
3003 restore_flags(flags); 2483 spin_unlock_irqrestore(&epca_lock, flags);
3004 break; 2484 break;
3005 2485
3006 case DIGI_GETFLOW: 2486 case DIGI_GETFLOW:
3007 case DIGI_GETAFLOW: 2487 case DIGI_GETAFLOW:
3008 cli(); 2488 spin_lock_irqsave(&epca_lock, flags);
3009 globalwinon(ch); 2489 globalwinon(ch);
3010 if ((cmd) == (DIGI_GETFLOW)) 2490 if (cmd == DIGI_GETFLOW) {
3011 { 2491 dflow.startc = readb(&bc->startc);
3012 dflow.startc = bc->startc; 2492 dflow.stopc = readb(&bc->stopc);
3013 dflow.stopc = bc->stopc; 2493 } else {
3014 } 2494 dflow.startc = readb(&bc->startca);
3015 else 2495 dflow.stopc = readb(&bc->stopca);
3016 {
3017 dflow.startc = bc->startca;
3018 dflow.stopc = bc->stopca;
3019 } 2496 }
3020 memoff(ch); 2497 memoff(ch);
3021 restore_flags(flags); 2498 spin_unlock_irqrestore(&epca_lock, flags);
3022 2499
3023 if (copy_to_user(argp, &dflow, sizeof(dflow))) 2500 if (copy_to_user(argp, &dflow, sizeof(dflow)))
3024 return -EFAULT; 2501 return -EFAULT;
@@ -3026,13 +2503,10 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
3026 2503
3027 case DIGI_SETAFLOW: 2504 case DIGI_SETAFLOW:
3028 case DIGI_SETFLOW: 2505 case DIGI_SETFLOW:
3029 if ((cmd) == (DIGI_SETFLOW)) 2506 if (cmd == DIGI_SETFLOW) {
3030 {
3031 startc = ch->startc; 2507 startc = ch->startc;
3032 stopc = ch->stopc; 2508 stopc = ch->stopc;
3033 } 2509 } else {
3034 else
3035 {
3036 startc = ch->startca; 2510 startc = ch->startca;
3037 stopc = ch->stopca; 2511 stopc = ch->stopca;
3038 } 2512 }
@@ -3040,40 +2514,31 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
3040 if (copy_from_user(&dflow, argp, sizeof(dflow))) 2514 if (copy_from_user(&dflow, argp, sizeof(dflow)))
3041 return -EFAULT; 2515 return -EFAULT;
3042 2516
3043 if (dflow.startc != startc || dflow.stopc != stopc) 2517 if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */
3044 { /* Begin if setflow toggled */ 2518 spin_lock_irqsave(&epca_lock, flags);
3045 cli();
3046 globalwinon(ch); 2519 globalwinon(ch);
3047 2520
3048 if ((cmd) == (DIGI_SETFLOW)) 2521 if (cmd == DIGI_SETFLOW) {
3049 {
3050 ch->fepstartc = ch->startc = dflow.startc; 2522 ch->fepstartc = ch->startc = dflow.startc;
3051 ch->fepstopc = ch->stopc = dflow.stopc; 2523 ch->fepstopc = ch->stopc = dflow.stopc;
3052 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); 2524 fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
3053 } 2525 } else {
3054 else
3055 {
3056 ch->fepstartca = ch->startca = dflow.startc; 2526 ch->fepstartca = ch->startca = dflow.startc;
3057 ch->fepstopca = ch->stopca = dflow.stopc; 2527 ch->fepstopca = ch->stopca = dflow.stopc;
3058 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); 2528 fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
3059 } 2529 }
3060 2530
3061 if (ch->statusflags & TXSTOPPED) 2531 if (ch->statusflags & TXSTOPPED)
3062 pc_start(tty); 2532 pc_start(tty);
3063 2533
3064 memoff(ch); 2534 memoff(ch);
3065 restore_flags(flags); 2535 spin_unlock_irqrestore(&epca_lock, flags);
3066
3067 } /* End if setflow toggled */ 2536 } /* End if setflow toggled */
3068 break; 2537 break;
3069
3070 default: 2538 default:
3071 return -ENOIOCTLCMD; 2539 return -ENOIOCTLCMD;
3072
3073 } /* End switch cmd */ 2540 } /* End switch cmd */
3074
3075 return 0; 2541 return 0;
3076
3077} /* End pc_ioctl */ 2542} /* End pc_ioctl */
3078 2543
3079/* --------------------- Begin pc_set_termios ----------------------- */ 2544/* --------------------- Begin pc_set_termios ----------------------- */
@@ -3083,20 +2548,16 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
3083 2548
3084 struct channel *ch; 2549 struct channel *ch;
3085 unsigned long flags; 2550 unsigned long flags;
3086
3087 /* --------------------------------------------------------- 2551 /* ---------------------------------------------------------
3088 verifyChannel returns the channel from the tty struct 2552 verifyChannel returns the channel from the tty struct
3089 if it is valid. This serves as a sanity check. 2553 if it is valid. This serves as a sanity check.
3090 ------------------------------------------------------------- */ 2554 ------------------------------------------------------------- */
3091 2555 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3092 if ((ch = verifyChannel(tty)) != NULL) 2556 spin_lock_irqsave(&epca_lock, flags);
3093 { /* Begin if channel valid */
3094
3095 save_flags(flags);
3096 cli();
3097 globalwinon(ch); 2557 globalwinon(ch);
3098 epcaparam(tty, ch); 2558 epcaparam(tty, ch);
3099 memoff(ch); 2559 memoff(ch);
2560 spin_unlock_irqrestore(&epca_lock, flags);
3100 2561
3101 if ((old_termios->c_cflag & CRTSCTS) && 2562 if ((old_termios->c_cflag & CRTSCTS) &&
3102 ((tty->termios->c_cflag & CRTSCTS) == 0)) 2563 ((tty->termios->c_cflag & CRTSCTS) == 0))
@@ -3106,8 +2567,6 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
3106 (tty->termios->c_cflag & CLOCAL)) 2567 (tty->termios->c_cflag & CLOCAL))
3107 wake_up_interruptible(&ch->open_wait); 2568 wake_up_interruptible(&ch->open_wait);
3108 2569
3109 restore_flags(flags);
3110
3111 } /* End if channel valid */ 2570 } /* End if channel valid */
3112 2571
3113} /* End pc_set_termios */ 2572} /* End pc_set_termios */
@@ -3116,29 +2575,18 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios)
3116 2575
3117static void do_softint(void *private_) 2576static void do_softint(void *private_)
3118{ /* Begin do_softint */ 2577{ /* Begin do_softint */
3119
3120 struct channel *ch = (struct channel *) private_; 2578 struct channel *ch = (struct channel *) private_;
3121
3122
3123 /* Called in response to a modem change event */ 2579 /* Called in response to a modem change event */
3124 2580 if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */
3125 if (ch && ch->magic == EPCA_MAGIC)
3126 { /* Begin EPCA_MAGIC */
3127
3128 struct tty_struct *tty = ch->tty; 2581 struct tty_struct *tty = ch->tty;
3129 2582
3130 if (tty && tty->driver_data) 2583 if (tty && tty->driver_data) {
3131 { 2584 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { /* Begin if clear_bit */
3132 if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event))
3133 { /* Begin if clear_bit */
3134
3135 tty_hangup(tty); /* FIXME: module removal race here - AKPM */ 2585 tty_hangup(tty); /* FIXME: module removal race here - AKPM */
3136 wake_up_interruptible(&ch->open_wait); 2586 wake_up_interruptible(&ch->open_wait);
3137 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; 2587 ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
3138
3139 } /* End if clear_bit */ 2588 } /* End if clear_bit */
3140 } 2589 }
3141
3142 } /* End EPCA_MAGIC */ 2590 } /* End EPCA_MAGIC */
3143} /* End do_softint */ 2591} /* End do_softint */
3144 2592
@@ -3154,82 +2602,49 @@ static void pc_stop(struct tty_struct *tty)
3154 2602
3155 struct channel *ch; 2603 struct channel *ch;
3156 unsigned long flags; 2604 unsigned long flags;
3157
3158 /* --------------------------------------------------------- 2605 /* ---------------------------------------------------------
3159 verifyChannel returns the channel from the tty struct 2606 verifyChannel returns the channel from the tty struct
3160 if it is valid. This serves as a sanity check. 2607 if it is valid. This serves as a sanity check.
3161 ------------------------------------------------------------- */ 2608 ------------------------------------------------------------- */
3162 2609 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if valid channel */
3163 if ((ch = verifyChannel(tty)) != NULL) 2610 spin_lock_irqsave(&epca_lock, flags);
3164 { /* Begin if valid channel */ 2611 if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */
3165
3166 save_flags(flags);
3167 cli();
3168
3169 if ((ch->statusflags & TXSTOPPED) == 0)
3170 { /* Begin if transmit stop requested */
3171
3172 globalwinon(ch); 2612 globalwinon(ch);
3173
3174 /* STOP transmitting now !! */ 2613 /* STOP transmitting now !! */
3175
3176 fepcmd(ch, PAUSETX, 0, 0, 0, 0); 2614 fepcmd(ch, PAUSETX, 0, 0, 0, 0);
3177
3178 ch->statusflags |= TXSTOPPED; 2615 ch->statusflags |= TXSTOPPED;
3179 memoff(ch); 2616 memoff(ch);
3180
3181 } /* End if transmit stop requested */ 2617 } /* End if transmit stop requested */
3182 2618 spin_unlock_irqrestore(&epca_lock, flags);
3183 restore_flags(flags);
3184
3185 } /* End if valid channel */ 2619 } /* End if valid channel */
3186
3187} /* End pc_stop */ 2620} /* End pc_stop */
3188 2621
3189/* --------------------- Begin pc_start ----------------------- */ 2622/* --------------------- Begin pc_start ----------------------- */
3190 2623
3191static void pc_start(struct tty_struct *tty) 2624static void pc_start(struct tty_struct *tty)
3192{ /* Begin pc_start */ 2625{ /* Begin pc_start */
3193
3194 struct channel *ch; 2626 struct channel *ch;
3195
3196 /* --------------------------------------------------------- 2627 /* ---------------------------------------------------------
3197 verifyChannel returns the channel from the tty struct 2628 verifyChannel returns the channel from the tty struct
3198 if it is valid. This serves as a sanity check. 2629 if it is valid. This serves as a sanity check.
3199 ------------------------------------------------------------- */ 2630 ------------------------------------------------------------- */
3200 2631 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3201 if ((ch = verifyChannel(tty)) != NULL)
3202 { /* Begin if channel valid */
3203
3204 unsigned long flags; 2632 unsigned long flags;
3205 2633 spin_lock_irqsave(&epca_lock, flags);
3206 save_flags(flags);
3207 cli();
3208
3209 /* Just in case output was resumed because of a change in Digi-flow */ 2634 /* Just in case output was resumed because of a change in Digi-flow */
3210 if (ch->statusflags & TXSTOPPED) 2635 if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */
3211 { /* Begin transmit resume requested */ 2636 struct board_chan *bc;
3212
3213 volatile struct board_chan *bc;
3214
3215 globalwinon(ch); 2637 globalwinon(ch);
3216 bc = ch->brdchan; 2638 bc = ch->brdchan;
3217 if (ch->statusflags & LOWWAIT) 2639 if (ch->statusflags & LOWWAIT)
3218 bc->ilow = 1; 2640 writeb(1, &bc->ilow);
3219
3220 /* Okay, you can start transmitting again... */ 2641 /* Okay, you can start transmitting again... */
3221
3222 fepcmd(ch, RESUMETX, 0, 0, 0, 0); 2642 fepcmd(ch, RESUMETX, 0, 0, 0, 0);
3223
3224 ch->statusflags &= ~TXSTOPPED; 2643 ch->statusflags &= ~TXSTOPPED;
3225 memoff(ch); 2644 memoff(ch);
3226
3227 } /* End transmit resume requested */ 2645 } /* End transmit resume requested */
3228 2646 spin_unlock_irqrestore(&epca_lock, flags);
3229 restore_flags(flags);
3230
3231 } /* End if channel valid */ 2647 } /* End if channel valid */
3232
3233} /* End pc_start */ 2648} /* End pc_start */
3234 2649
3235/* ------------------------------------------------------------------ 2650/* ------------------------------------------------------------------
@@ -3244,86 +2659,55 @@ ______________________________________________________________________ */
3244 2659
3245static void pc_throttle(struct tty_struct * tty) 2660static void pc_throttle(struct tty_struct * tty)
3246{ /* Begin pc_throttle */ 2661{ /* Begin pc_throttle */
3247
3248 struct channel *ch; 2662 struct channel *ch;
3249 unsigned long flags; 2663 unsigned long flags;
3250
3251 /* --------------------------------------------------------- 2664 /* ---------------------------------------------------------
3252 verifyChannel returns the channel from the tty struct 2665 verifyChannel returns the channel from the tty struct
3253 if it is valid. This serves as a sanity check. 2666 if it is valid. This serves as a sanity check.
3254 ------------------------------------------------------------- */ 2667 ------------------------------------------------------------- */
3255 2668 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3256 if ((ch = verifyChannel(tty)) != NULL) 2669 spin_lock_irqsave(&epca_lock, flags);
3257 { /* Begin if channel valid */ 2670 if ((ch->statusflags & RXSTOPPED) == 0) {
3258
3259
3260 save_flags(flags);
3261 cli();
3262
3263 if ((ch->statusflags & RXSTOPPED) == 0)
3264 {
3265 globalwinon(ch); 2671 globalwinon(ch);
3266 fepcmd(ch, PAUSERX, 0, 0, 0, 0); 2672 fepcmd(ch, PAUSERX, 0, 0, 0, 0);
3267
3268 ch->statusflags |= RXSTOPPED; 2673 ch->statusflags |= RXSTOPPED;
3269 memoff(ch); 2674 memoff(ch);
3270 } 2675 }
3271 restore_flags(flags); 2676 spin_unlock_irqrestore(&epca_lock, flags);
3272
3273 } /* End if channel valid */ 2677 } /* End if channel valid */
3274
3275} /* End pc_throttle */ 2678} /* End pc_throttle */
3276 2679
3277/* --------------------- Begin unthrottle ----------------------- */ 2680/* --------------------- Begin unthrottle ----------------------- */
3278 2681
3279static void pc_unthrottle(struct tty_struct *tty) 2682static void pc_unthrottle(struct tty_struct *tty)
3280{ /* Begin pc_unthrottle */ 2683{ /* Begin pc_unthrottle */
3281
3282 struct channel *ch; 2684 struct channel *ch;
3283 unsigned long flags; 2685 unsigned long flags;
3284 volatile struct board_chan *bc;
3285
3286
3287 /* --------------------------------------------------------- 2686 /* ---------------------------------------------------------
3288 verifyChannel returns the channel from the tty struct 2687 verifyChannel returns the channel from the tty struct
3289 if it is valid. This serves as a sanity check. 2688 if it is valid. This serves as a sanity check.
3290 ------------------------------------------------------------- */ 2689 ------------------------------------------------------------- */
3291 2690 if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */
3292 if ((ch = verifyChannel(tty)) != NULL)
3293 { /* Begin if channel valid */
3294
3295
3296 /* Just in case output was resumed because of a change in Digi-flow */ 2691 /* Just in case output was resumed because of a change in Digi-flow */
3297 save_flags(flags); 2692 spin_lock_irqsave(&epca_lock, flags);
3298 cli(); 2693 if (ch->statusflags & RXSTOPPED) {
3299
3300 if (ch->statusflags & RXSTOPPED)
3301 {
3302
3303 globalwinon(ch); 2694 globalwinon(ch);
3304 bc = ch->brdchan;
3305 fepcmd(ch, RESUMERX, 0, 0, 0, 0); 2695 fepcmd(ch, RESUMERX, 0, 0, 0, 0);
3306
3307 ch->statusflags &= ~RXSTOPPED; 2696 ch->statusflags &= ~RXSTOPPED;
3308 memoff(ch); 2697 memoff(ch);
3309 } 2698 }
3310 restore_flags(flags); 2699 spin_unlock_irqrestore(&epca_lock, flags);
3311
3312 } /* End if channel valid */ 2700 } /* End if channel valid */
3313
3314} /* End pc_unthrottle */ 2701} /* End pc_unthrottle */
3315 2702
3316/* --------------------- Begin digi_send_break ----------------------- */ 2703/* --------------------- Begin digi_send_break ----------------------- */
3317 2704
3318void digi_send_break(struct channel *ch, int msec) 2705void digi_send_break(struct channel *ch, int msec)
3319{ /* Begin digi_send_break */ 2706{ /* Begin digi_send_break */
3320
3321 unsigned long flags; 2707 unsigned long flags;
3322 2708
3323 save_flags(flags); 2709 spin_lock_irqsave(&epca_lock, flags);
3324 cli();
3325 globalwinon(ch); 2710 globalwinon(ch);
3326
3327 /* -------------------------------------------------------------------- 2711 /* --------------------------------------------------------------------
3328 Maybe I should send an infinite break here, schedule() for 2712 Maybe I should send an infinite break here, schedule() for
3329 msec amount of time, and then stop the break. This way, 2713 msec amount of time, and then stop the break. This way,
@@ -3331,36 +2715,28 @@ void digi_send_break(struct channel *ch, int msec)
3331 to be called (i.e. via an ioctl()) more than once in msec amount 2715 to be called (i.e. via an ioctl()) more than once in msec amount
3332 of time. Try this for now... 2716 of time. Try this for now...
3333 ------------------------------------------------------------------------ */ 2717 ------------------------------------------------------------------------ */
3334
3335 fepcmd(ch, SENDBREAK, msec, 0, 10, 0); 2718 fepcmd(ch, SENDBREAK, msec, 0, 10, 0);
3336 memoff(ch); 2719 memoff(ch);
3337 2720 spin_unlock_irqrestore(&epca_lock, flags);
3338 restore_flags(flags);
3339
3340} /* End digi_send_break */ 2721} /* End digi_send_break */
3341 2722
3342/* --------------------- Begin setup_empty_event ----------------------- */ 2723/* --------------------- Begin setup_empty_event ----------------------- */
3343 2724
2725/* Caller MUST hold the lock */
2726
3344static void setup_empty_event(struct tty_struct *tty, struct channel *ch) 2727static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
3345{ /* Begin setup_empty_event */ 2728{ /* Begin setup_empty_event */
3346 2729
3347 volatile struct board_chan *bc = ch->brdchan; 2730 struct board_chan *bc = ch->brdchan;
3348 unsigned long int flags;
3349 2731
3350 save_flags(flags);
3351 cli();
3352 globalwinon(ch); 2732 globalwinon(ch);
3353 ch->statusflags |= EMPTYWAIT; 2733 ch->statusflags |= EMPTYWAIT;
3354
3355 /* ------------------------------------------------------------------ 2734 /* ------------------------------------------------------------------
3356 When set the iempty flag request a event to be generated when the 2735 When set the iempty flag request a event to be generated when the
3357 transmit buffer is empty (If there is no BREAK in progress). 2736 transmit buffer is empty (If there is no BREAK in progress).
3358 --------------------------------------------------------------------- */ 2737 --------------------------------------------------------------------- */
3359 2738 writeb(1, &bc->iempty);
3360 bc->iempty = 1;
3361 memoff(ch); 2739 memoff(ch);
3362 restore_flags(flags);
3363
3364} /* End setup_empty_event */ 2740} /* End setup_empty_event */
3365 2741
3366/* --------------------- Begin get_termio ----------------------- */ 2742/* --------------------- Begin get_termio ----------------------- */
@@ -3369,10 +2745,10 @@ static int get_termio(struct tty_struct * tty, struct termio __user * termio)
3369{ /* Begin get_termio */ 2745{ /* Begin get_termio */
3370 return kernel_termios_to_user_termio(termio, tty->termios); 2746 return kernel_termios_to_user_termio(termio, tty->termios);
3371} /* End get_termio */ 2747} /* End get_termio */
2748
3372/* ---------------------- Begin epca_setup -------------------------- */ 2749/* ---------------------- Begin epca_setup -------------------------- */
3373void epca_setup(char *str, int *ints) 2750void epca_setup(char *str, int *ints)
3374{ /* Begin epca_setup */ 2751{ /* Begin epca_setup */
3375
3376 struct board_info board; 2752 struct board_info board;
3377 int index, loop, last; 2753 int index, loop, last;
3378 char *temp, *t2; 2754 char *temp, *t2;
@@ -3394,49 +2770,41 @@ void epca_setup(char *str, int *ints)
3394 for (last = 0, index = 1; index <= ints[0]; index++) 2770 for (last = 0, index = 1; index <= ints[0]; index++)
3395 switch(index) 2771 switch(index)
3396 { /* Begin parse switch */ 2772 { /* Begin parse switch */
3397
3398 case 1: 2773 case 1:
3399 board.status = ints[index]; 2774 board.status = ints[index];
3400
3401 /* --------------------------------------------------------- 2775 /* ---------------------------------------------------------
3402 We check for 2 (As opposed to 1; because 2 is a flag 2776 We check for 2 (As opposed to 1; because 2 is a flag
3403 instructing the driver to ignore epcaconfig.) For this 2777 instructing the driver to ignore epcaconfig.) For this
3404 reason we check for 2. 2778 reason we check for 2.
3405 ------------------------------------------------------------ */ 2779 ------------------------------------------------------------ */
3406 if (board.status == 2) 2780 if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */
3407 { /* Begin ignore epcaconfig as well as lilo cmd line */
3408 nbdevs = 0; 2781 nbdevs = 0;
3409 num_cards = 0; 2782 num_cards = 0;
3410 return; 2783 return;
3411 } /* End ignore epcaconfig as well as lilo cmd line */ 2784 } /* End ignore epcaconfig as well as lilo cmd line */
3412 2785
3413 if (board.status > 2) 2786 if (board.status > 2) {
3414 { 2787 printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status);
3415 printk(KERN_ERR "<Error> - epca_setup: Invalid board status 0x%x\n", board.status);
3416 invalid_lilo_config = 1; 2788 invalid_lilo_config = 1;
3417 setup_error_code |= INVALID_BOARD_STATUS; 2789 setup_error_code |= INVALID_BOARD_STATUS;
3418 return; 2790 return;
3419 } 2791 }
3420 last = index; 2792 last = index;
3421 break; 2793 break;
3422
3423 case 2: 2794 case 2:
3424 board.type = ints[index]; 2795 board.type = ints[index];
3425 if (board.type >= PCIXEM) 2796 if (board.type >= PCIXEM) {
3426 { 2797 printk(KERN_ERR "epca_setup: Invalid board type 0x%x\n", board.type);
3427 printk(KERN_ERR "<Error> - epca_setup: Invalid board type 0x%x\n", board.type);
3428 invalid_lilo_config = 1; 2798 invalid_lilo_config = 1;
3429 setup_error_code |= INVALID_BOARD_TYPE; 2799 setup_error_code |= INVALID_BOARD_TYPE;
3430 return; 2800 return;
3431 } 2801 }
3432 last = index; 2802 last = index;
3433 break; 2803 break;
3434
3435 case 3: 2804 case 3:
3436 board.altpin = ints[index]; 2805 board.altpin = ints[index];
3437 if (board.altpin > 1) 2806 if (board.altpin > 1) {
3438 { 2807 printk(KERN_ERR "epca_setup: Invalid board altpin 0x%x\n", board.altpin);
3439 printk(KERN_ERR "<Error> - epca_setup: Invalid board altpin 0x%x\n", board.altpin);
3440 invalid_lilo_config = 1; 2808 invalid_lilo_config = 1;
3441 setup_error_code |= INVALID_ALTPIN; 2809 setup_error_code |= INVALID_ALTPIN;
3442 return; 2810 return;
@@ -3446,9 +2814,8 @@ void epca_setup(char *str, int *ints)
3446 2814
3447 case 4: 2815 case 4:
3448 board.numports = ints[index]; 2816 board.numports = ints[index];
3449 if ((board.numports < 2) || (board.numports > 256)) 2817 if (board.numports < 2 || board.numports > 256) {
3450 { 2818 printk(KERN_ERR "epca_setup: Invalid board numports 0x%x\n", board.numports);
3451 printk(KERN_ERR "<Error> - epca_setup: Invalid board numports 0x%x\n", board.numports);
3452 invalid_lilo_config = 1; 2819 invalid_lilo_config = 1;
3453 setup_error_code |= INVALID_NUM_PORTS; 2820 setup_error_code |= INVALID_NUM_PORTS;
3454 return; 2821 return;
@@ -3458,10 +2825,9 @@ void epca_setup(char *str, int *ints)
3458 break; 2825 break;
3459 2826
3460 case 5: 2827 case 5:
3461 board.port = (unsigned char *)ints[index]; 2828 board.port = ints[index];
3462 if (ints[index] <= 0) 2829 if (ints[index] <= 0) {
3463 { 2830 printk(KERN_ERR "epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
3464 printk(KERN_ERR "<Error> - epca_setup: Invalid io port 0x%x\n", (unsigned int)board.port);
3465 invalid_lilo_config = 1; 2831 invalid_lilo_config = 1;
3466 setup_error_code |= INVALID_PORT_BASE; 2832 setup_error_code |= INVALID_PORT_BASE;
3467 return; 2833 return;
@@ -3470,10 +2836,9 @@ void epca_setup(char *str, int *ints)
3470 break; 2836 break;
3471 2837
3472 case 6: 2838 case 6:
3473 board.membase = (unsigned char *)ints[index]; 2839 board.membase = ints[index];
3474 if (ints[index] <= 0) 2840 if (ints[index] <= 0) {
3475 { 2841 printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
3476 printk(KERN_ERR "<Error> - epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
3477 invalid_lilo_config = 1; 2842 invalid_lilo_config = 1;
3478 setup_error_code |= INVALID_MEM_BASE; 2843 setup_error_code |= INVALID_MEM_BASE;
3479 return; 2844 return;
@@ -3487,21 +2852,16 @@ void epca_setup(char *str, int *ints)
3487 2852
3488 } /* End parse switch */ 2853 } /* End parse switch */
3489 2854
3490 while (str && *str) 2855 while (str && *str) { /* Begin while there is a string arg */
3491 { /* Begin while there is a string arg */
3492
3493 /* find the next comma or terminator */ 2856 /* find the next comma or terminator */
3494 temp = str; 2857 temp = str;
3495
3496 /* While string is not null, and a comma hasn't been found */ 2858 /* While string is not null, and a comma hasn't been found */
3497 while (*temp && (*temp != ',')) 2859 while (*temp && (*temp != ','))
3498 temp++; 2860 temp++;
3499
3500 if (!*temp) 2861 if (!*temp)
3501 temp = NULL; 2862 temp = NULL;
3502 else 2863 else
3503 *temp++ = 0; 2864 *temp++ = 0;
3504
3505 /* Set index to the number of args + 1 */ 2865 /* Set index to the number of args + 1 */
3506 index = last + 1; 2866 index = last + 1;
3507 2867
@@ -3511,12 +2871,10 @@ void epca_setup(char *str, int *ints)
3511 len = strlen(str); 2871 len = strlen(str);
3512 if (strncmp("Disable", str, len) == 0) 2872 if (strncmp("Disable", str, len) == 0)
3513 board.status = 0; 2873 board.status = 0;
3514 else 2874 else if (strncmp("Enable", str, len) == 0)
3515 if (strncmp("Enable", str, len) == 0)
3516 board.status = 1; 2875 board.status = 1;
3517 else 2876 else {
3518 { 2877 printk(KERN_ERR "epca_setup: Invalid status %s\n", str);
3519 printk(KERN_ERR "<Error> - epca_setup: Invalid status %s\n", str);
3520 invalid_lilo_config = 1; 2878 invalid_lilo_config = 1;
3521 setup_error_code |= INVALID_BOARD_STATUS; 2879 setup_error_code |= INVALID_BOARD_STATUS;
3522 return; 2880 return;
@@ -3525,22 +2883,17 @@ void epca_setup(char *str, int *ints)
3525 break; 2883 break;
3526 2884
3527 case 2: 2885 case 2:
3528
3529 for(loop = 0; loop < EPCA_NUM_TYPES; loop++) 2886 for(loop = 0; loop < EPCA_NUM_TYPES; loop++)
3530 if (strcmp(board_desc[loop], str) == 0) 2887 if (strcmp(board_desc[loop], str) == 0)
3531 break; 2888 break;
3532
3533
3534 /* --------------------------------------------------------------- 2889 /* ---------------------------------------------------------------
3535 If the index incremented above refers to a legitamate board 2890 If the index incremented above refers to a legitamate board
3536 type set it here. 2891 type set it here.
3537 ------------------------------------------------------------------*/ 2892 ------------------------------------------------------------------*/
3538
3539 if (index < EPCA_NUM_TYPES) 2893 if (index < EPCA_NUM_TYPES)
3540 board.type = loop; 2894 board.type = loop;
3541 else 2895 else {
3542 { 2896 printk(KERN_ERR "epca_setup: Invalid board type: %s\n", str);
3543 printk(KERN_ERR "<Error> - epca_setup: Invalid board type: %s\n", str);
3544 invalid_lilo_config = 1; 2897 invalid_lilo_config = 1;
3545 setup_error_code |= INVALID_BOARD_TYPE; 2898 setup_error_code |= INVALID_BOARD_TYPE;
3546 return; 2899 return;
@@ -3552,12 +2905,10 @@ void epca_setup(char *str, int *ints)
3552 len = strlen(str); 2905 len = strlen(str);
3553 if (strncmp("Disable", str, len) == 0) 2906 if (strncmp("Disable", str, len) == 0)
3554 board.altpin = 0; 2907 board.altpin = 0;
3555 else 2908 else if (strncmp("Enable", str, len) == 0)
3556 if (strncmp("Enable", str, len) == 0)
3557 board.altpin = 1; 2909 board.altpin = 1;
3558 else 2910 else {
3559 { 2911 printk(KERN_ERR "epca_setup: Invalid altpin %s\n", str);
3560 printk(KERN_ERR "<Error> - epca_setup: Invalid altpin %s\n", str);
3561 invalid_lilo_config = 1; 2912 invalid_lilo_config = 1;
3562 setup_error_code |= INVALID_ALTPIN; 2913 setup_error_code |= INVALID_ALTPIN;
3563 return; 2914 return;
@@ -3570,9 +2921,8 @@ void epca_setup(char *str, int *ints)
3570 while (isdigit(*t2)) 2921 while (isdigit(*t2))
3571 t2++; 2922 t2++;
3572 2923
3573 if (*t2) 2924 if (*t2) {
3574 { 2925 printk(KERN_ERR "epca_setup: Invalid port count %s\n", str);
3575 printk(KERN_ERR "<Error> - epca_setup: Invalid port count %s\n", str);
3576 invalid_lilo_config = 1; 2926 invalid_lilo_config = 1;
3577 setup_error_code |= INVALID_NUM_PORTS; 2927 setup_error_code |= INVALID_NUM_PORTS;
3578 return; 2928 return;
@@ -3601,15 +2951,14 @@ void epca_setup(char *str, int *ints)
3601 while (isxdigit(*t2)) 2951 while (isxdigit(*t2))
3602 t2++; 2952 t2++;
3603 2953
3604 if (*t2) 2954 if (*t2) {
3605 { 2955 printk(KERN_ERR "epca_setup: Invalid i/o address %s\n", str);
3606 printk(KERN_ERR "<Error> - epca_setup: Invalid i/o address %s\n", str);
3607 invalid_lilo_config = 1; 2956 invalid_lilo_config = 1;
3608 setup_error_code |= INVALID_PORT_BASE; 2957 setup_error_code |= INVALID_PORT_BASE;
3609 return; 2958 return;
3610 } 2959 }
3611 2960
3612 board.port = (unsigned char *)simple_strtoul(str, NULL, 16); 2961 board.port = simple_strtoul(str, NULL, 16);
3613 last = index; 2962 last = index;
3614 break; 2963 break;
3615 2964
@@ -3618,52 +2967,38 @@ void epca_setup(char *str, int *ints)
3618 while (isxdigit(*t2)) 2967 while (isxdigit(*t2))
3619 t2++; 2968 t2++;
3620 2969
3621 if (*t2) 2970 if (*t2) {
3622 { 2971 printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str);
3623 printk(KERN_ERR "<Error> - epca_setup: Invalid memory base %s\n",str);
3624 invalid_lilo_config = 1; 2972 invalid_lilo_config = 1;
3625 setup_error_code |= INVALID_MEM_BASE; 2973 setup_error_code |= INVALID_MEM_BASE;
3626 return; 2974 return;
3627 } 2975 }
3628 2976 board.membase = simple_strtoul(str, NULL, 16);
3629 board.membase = (unsigned char *)simple_strtoul(str, NULL, 16);
3630 last = index; 2977 last = index;
3631 break; 2978 break;
3632
3633 default: 2979 default:
3634 printk(KERN_ERR "PC/Xx: Too many string parms\n"); 2980 printk(KERN_ERR "epca: Too many string parms\n");
3635 return; 2981 return;
3636 } 2982 }
3637 str = temp; 2983 str = temp;
3638
3639 } /* End while there is a string arg */ 2984 } /* End while there is a string arg */
3640 2985
3641 2986 if (last < 6) {
3642 if (last < 6) 2987 printk(KERN_ERR "epca: Insufficient parms specified\n");
3643 {
3644 printk(KERN_ERR "PC/Xx: Insufficient parms specified\n");
3645 return; 2988 return;
3646 } 2989 }
3647 2990
3648 /* I should REALLY validate the stuff here */ 2991 /* I should REALLY validate the stuff here */
3649
3650 /* Copies our local copy of board into boards */ 2992 /* Copies our local copy of board into boards */
3651 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board)); 2993 memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board));
3652
3653
3654 /* Does this get called once per lilo arg are what ? */ 2994 /* Does this get called once per lilo arg are what ? */
3655
3656 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", 2995 printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n",
3657 num_cards, board_desc[board.type], 2996 num_cards, board_desc[board.type],
3658 board.numports, (int)board.port, (unsigned int) board.membase); 2997 board.numports, (int)board.port, (unsigned int) board.membase);
3659
3660 num_cards++; 2998 num_cards++;
3661
3662} /* End epca_setup */ 2999} /* End epca_setup */
3663 3000
3664 3001
3665
3666#ifdef ENABLE_PCI
3667/* ------------------------ Begin init_PCI --------------------------- */ 3002/* ------------------------ Begin init_PCI --------------------------- */
3668 3003
3669enum epic_board_types { 3004enum epic_board_types {
@@ -3685,7 +3020,6 @@ static struct {
3685 { PCIXRJ, 2, }, 3020 { PCIXRJ, 2, },
3686}; 3021};
3687 3022
3688
3689static int __devinit epca_init_one (struct pci_dev *pdev, 3023static int __devinit epca_init_one (struct pci_dev *pdev,
3690 const struct pci_device_id *ent) 3024 const struct pci_device_id *ent)
3691{ 3025{
@@ -3711,10 +3045,8 @@ static int __devinit epca_init_one (struct pci_dev *pdev,
3711 boards[board_idx].status = ENABLED; 3045 boards[board_idx].status = ENABLED;
3712 boards[board_idx].type = epca_info_tbl[info_idx].board_type; 3046 boards[board_idx].type = epca_info_tbl[info_idx].board_type;
3713 boards[board_idx].numports = 0x0; 3047 boards[board_idx].numports = 0x0;
3714 boards[board_idx].port = 3048 boards[board_idx].port = addr + PCI_IO_OFFSET;
3715 (unsigned char *)((char *) addr + PCI_IO_OFFSET); 3049 boards[board_idx].membase = addr;
3716 boards[board_idx].membase =
3717 (unsigned char *)((char *) addr);
3718 3050
3719 if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) { 3051 if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) {
3720 printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", 3052 printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
@@ -3775,15 +3107,13 @@ static struct pci_device_id epca_pci_tbl[] = {
3775MODULE_DEVICE_TABLE(pci, epca_pci_tbl); 3107MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
3776 3108
3777int __init init_PCI (void) 3109int __init init_PCI (void)
3778{ /* Begin init_PCI */ 3110{ /* Begin init_PCI */
3779 memset (&epca_driver, 0, sizeof (epca_driver)); 3111 memset (&epca_driver, 0, sizeof (epca_driver));
3780 epca_driver.name = "epca"; 3112 epca_driver.name = "epca";
3781 epca_driver.id_table = epca_pci_tbl; 3113 epca_driver.id_table = epca_pci_tbl;
3782 epca_driver.probe = epca_init_one; 3114 epca_driver.probe = epca_init_one;
3783 3115
3784 return pci_register_driver(&epca_driver); 3116 return pci_register_driver(&epca_driver);
3785} /* End init_PCI */ 3117}
3786
3787#endif /* ENABLE_PCI */
3788 3118
3789MODULE_LICENSE("GPL"); 3119MODULE_LICENSE("GPL");
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index 52205ef71314..20eeb5a70e1a 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -85,73 +85,73 @@ static char *board_desc[] =
85struct channel 85struct channel
86{ 86{
87 long magic; 87 long magic;
88 unchar boardnum; 88 unsigned char boardnum;
89 unchar channelnum; 89 unsigned char channelnum;
90 unchar omodem; /* FEP output modem status */ 90 unsigned char omodem; /* FEP output modem status */
91 unchar imodem; /* FEP input modem status */ 91 unsigned char imodem; /* FEP input modem status */
92 unchar modemfake; /* Modem values to be forced */ 92 unsigned char modemfake; /* Modem values to be forced */
93 unchar modem; /* Force values */ 93 unsigned char modem; /* Force values */
94 unchar hflow; 94 unsigned char hflow;
95 unchar dsr; 95 unsigned char dsr;
96 unchar dcd; 96 unsigned char dcd;
97 unchar m_rts ; /* The bits used in whatever FEP */ 97 unsigned char m_rts ; /* The bits used in whatever FEP */
98 unchar m_dcd ; /* is indiginous to this board to */ 98 unsigned char m_dcd ; /* is indiginous to this board to */
99 unchar m_dsr ; /* represent each of the physical */ 99 unsigned char m_dsr ; /* represent each of the physical */
100 unchar m_cts ; /* handshake lines */ 100 unsigned char m_cts ; /* handshake lines */
101 unchar m_ri ; 101 unsigned char m_ri ;
102 unchar m_dtr ; 102 unsigned char m_dtr ;
103 unchar stopc; 103 unsigned char stopc;
104 unchar startc; 104 unsigned char startc;
105 unchar stopca; 105 unsigned char stopca;
106 unchar startca; 106 unsigned char startca;
107 unchar fepstopc; 107 unsigned char fepstopc;
108 unchar fepstartc; 108 unsigned char fepstartc;
109 unchar fepstopca; 109 unsigned char fepstopca;
110 unchar fepstartca; 110 unsigned char fepstartca;
111 unchar txwin; 111 unsigned char txwin;
112 unchar rxwin; 112 unsigned char rxwin;
113 ushort fepiflag; 113 unsigned short fepiflag;
114 ushort fepcflag; 114 unsigned short fepcflag;
115 ushort fepoflag; 115 unsigned short fepoflag;
116 ushort txbufhead; 116 unsigned short txbufhead;
117 ushort txbufsize; 117 unsigned short txbufsize;
118 ushort rxbufhead; 118 unsigned short rxbufhead;
119 ushort rxbufsize; 119 unsigned short rxbufsize;
120 int close_delay; 120 int close_delay;
121 int count; 121 int count;
122 int blocked_open; 122 int blocked_open;
123 ulong event; 123 unsigned long event;
124 int asyncflags; 124 int asyncflags;
125 uint dev; 125 uint dev;
126 ulong statusflags; 126 unsigned long statusflags;
127 ulong c_iflag; 127 unsigned long c_iflag;
128 ulong c_cflag; 128 unsigned long c_cflag;
129 ulong c_lflag; 129 unsigned long c_lflag;
130 ulong c_oflag; 130 unsigned long c_oflag;
131 unchar *txptr; 131 unsigned char *txptr;
132 unchar *rxptr; 132 unsigned char *rxptr;
133 unchar *tmp_buf; 133 unsigned char *tmp_buf;
134 struct board_info *board; 134 struct board_info *board;
135 volatile struct board_chan *brdchan; 135 struct board_chan *brdchan;
136 struct digi_struct digiext; 136 struct digi_struct digiext;
137 struct tty_struct *tty; 137 struct tty_struct *tty;
138 wait_queue_head_t open_wait; 138 wait_queue_head_t open_wait;
139 wait_queue_head_t close_wait; 139 wait_queue_head_t close_wait;
140 struct work_struct tqueue; 140 struct work_struct tqueue;
141 volatile struct global_data *mailbox; 141 struct global_data *mailbox;
142}; 142};
143 143
144struct board_info 144struct board_info
145{ 145{
146 unchar status; 146 unsigned char status;
147 unchar type; 147 unsigned char type;
148 unchar altpin; 148 unsigned char altpin;
149 ushort numports; 149 unsigned short numports;
150 unchar *port; 150 unsigned long port;
151 unchar *membase; 151 unsigned long membase;
152 unchar __iomem *re_map_port; 152 unsigned char __iomem *re_map_port;
153 unchar *re_map_membase; 153 unsigned char *re_map_membase;
154 ulong memory_seg; 154 unsigned long memory_seg;
155 void ( * memwinon ) (struct board_info *, unsigned int) ; 155 void ( * memwinon ) (struct board_info *, unsigned int) ;
156 void ( * memwinoff ) (struct board_info *, unsigned int) ; 156 void ( * memwinoff ) (struct board_info *, unsigned int) ;
157 void ( * globalwinon ) (struct channel *) ; 157 void ( * globalwinon ) (struct channel *) ;
@@ -160,6 +160,6 @@ struct board_info
160 void ( * memoff ) (struct channel *) ; 160 void ( * memoff ) (struct channel *) ;
161 void ( * assertgwinon ) (struct channel *) ; 161 void ( * assertgwinon ) (struct channel *) ;
162 void ( * assertmemoff ) (struct channel *) ; 162 void ( * assertmemoff ) (struct channel *) ;
163 unchar poller_inhibited ; 163 unsigned char poller_inhibited ;
164}; 164};
165 165
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 762fa430fb5b..a695f25e4497 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -44,7 +44,7 @@
44/* 44/*
45 * The High Precision Event Timer driver. 45 * The High Precision Event Timer driver.
46 * This driver is closely modelled after the rtc.c driver. 46 * This driver is closely modelled after the rtc.c driver.
47 * http://www.intel.com/labs/platcomp/hpet/hpetspec.htm 47 * http://www.intel.com/hardwaredesign/hpetspec.htm
48 */ 48 */
49#define HPET_USER_FREQ (64) 49#define HPET_USER_FREQ (64)
50#define HPET_DRIFT (500) 50#define HPET_DRIFT (500)
@@ -712,7 +712,7 @@ static void hpet_register_interpolator(struct hpets *hpetp)
712 ti->shift = 10; 712 ti->shift = 10;
713 ti->addr = &hpetp->hp_hpet->hpet_mc; 713 ti->addr = &hpetp->hp_hpet->hpet_mc;
714 ti->frequency = hpet_time_div(hpets->hp_period); 714 ti->frequency = hpet_time_div(hpets->hp_period);
715 ti->drift = ti->frequency * HPET_DRIFT / 1000000; 715 ti->drift = HPET_DRIFT;
716 ti->mask = -1; 716 ti->mask = -1;
717 717
718 hpetp->hp_interpolator = ti; 718 hpetp->hp_interpolator = ti;
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 5ce9c6269033..33862670e285 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -31,8 +31,6 @@
31#include <linux/ipmi_msgdefs.h> /* for completion codes */ 31#include <linux/ipmi_msgdefs.h> /* for completion codes */
32#include "ipmi_si_sm.h" 32#include "ipmi_si_sm.h"
33 33
34#define IPMI_BT_VERSION "v33"
35
36static int bt_debug = 0x00; /* Production value 0, see following flags */ 34static int bt_debug = 0x00; /* Production value 0, see following flags */
37 35
38#define BT_DEBUG_ENABLE 1 36#define BT_DEBUG_ENABLE 1
@@ -163,7 +161,8 @@ static int bt_start_transaction(struct si_sm_data *bt,
163{ 161{
164 unsigned int i; 162 unsigned int i;
165 163
166 if ((size < 2) || (size > IPMI_MAX_MSG_LENGTH)) return -1; 164 if ((size < 2) || (size > IPMI_MAX_MSG_LENGTH))
165 return -1;
167 166
168 if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED)) 167 if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
169 return -2; 168 return -2;
@@ -171,7 +170,8 @@ static int bt_start_transaction(struct si_sm_data *bt,
171 if (bt_debug & BT_DEBUG_MSG) { 170 if (bt_debug & BT_DEBUG_MSG) {
172 printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n"); 171 printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
173 printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq); 172 printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
174 for (i = 0; i < size; i ++) printk (" %02x", data[i]); 173 for (i = 0; i < size; i ++)
174 printk (" %02x", data[i]);
175 printk("\n"); 175 printk("\n");
176 } 176 }
177 bt->write_data[0] = size + 1; /* all data plus seq byte */ 177 bt->write_data[0] = size + 1; /* all data plus seq byte */
@@ -210,15 +210,18 @@ static int bt_get_result(struct si_sm_data *bt,
210 } else { 210 } else {
211 data[0] = bt->read_data[1]; 211 data[0] = bt->read_data[1];
212 data[1] = bt->read_data[3]; 212 data[1] = bt->read_data[3];
213 if (length < msg_len) bt->truncated = 1; 213 if (length < msg_len)
214 bt->truncated = 1;
214 if (bt->truncated) { /* can be set in read_all_bytes() */ 215 if (bt->truncated) { /* can be set in read_all_bytes() */
215 data[2] = IPMI_ERR_MSG_TRUNCATED; 216 data[2] = IPMI_ERR_MSG_TRUNCATED;
216 msg_len = 3; 217 msg_len = 3;
217 } else memcpy(data + 2, bt->read_data + 4, msg_len - 2); 218 } else
219 memcpy(data + 2, bt->read_data + 4, msg_len - 2);
218 220
219 if (bt_debug & BT_DEBUG_MSG) { 221 if (bt_debug & BT_DEBUG_MSG) {
220 printk (KERN_WARNING "BT: res (raw)"); 222 printk (KERN_WARNING "BT: res (raw)");
221 for (i = 0; i < msg_len; i++) printk(" %02x", data[i]); 223 for (i = 0; i < msg_len; i++)
224 printk(" %02x", data[i]);
222 printk ("\n"); 225 printk ("\n");
223 } 226 }
224 } 227 }
@@ -231,8 +234,10 @@ static int bt_get_result(struct si_sm_data *bt,
231 234
232static void reset_flags(struct si_sm_data *bt) 235static void reset_flags(struct si_sm_data *bt)
233{ 236{
234 if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); 237 if (BT_STATUS & BT_H_BUSY)
235 if (BT_STATUS & BT_B_BUSY) BT_CONTROL(BT_B_BUSY); 238 BT_CONTROL(BT_H_BUSY);
239 if (BT_STATUS & BT_B_BUSY)
240 BT_CONTROL(BT_B_BUSY);
236 BT_CONTROL(BT_CLR_WR_PTR); 241 BT_CONTROL(BT_CLR_WR_PTR);
237 BT_CONTROL(BT_SMS_ATN); 242 BT_CONTROL(BT_SMS_ATN);
238#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION 243#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION
@@ -241,7 +246,8 @@ static void reset_flags(struct si_sm_data *bt)
241 BT_CONTROL(BT_H_BUSY); 246 BT_CONTROL(BT_H_BUSY);
242 BT_CONTROL(BT_B2H_ATN); 247 BT_CONTROL(BT_B2H_ATN);
243 BT_CONTROL(BT_CLR_RD_PTR); 248 BT_CONTROL(BT_CLR_RD_PTR);
244 for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++) BMC2HOST; 249 for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++)
250 BMC2HOST;
245 BT_CONTROL(BT_H_BUSY); 251 BT_CONTROL(BT_H_BUSY);
246 } 252 }
247#endif 253#endif
@@ -258,7 +264,8 @@ static inline void write_all_bytes(struct si_sm_data *bt)
258 printk (" %02x", bt->write_data[i]); 264 printk (" %02x", bt->write_data[i]);
259 printk ("\n"); 265 printk ("\n");
260 } 266 }
261 for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]); 267 for (i = 0; i < bt->write_count; i++)
268 HOST2BMC(bt->write_data[i]);
262} 269}
263 270
264static inline int read_all_bytes(struct si_sm_data *bt) 271static inline int read_all_bytes(struct si_sm_data *bt)
@@ -278,7 +285,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
278 bt->truncated = 1; 285 bt->truncated = 1;
279 return 1; /* let next XACTION START clean it up */ 286 return 1; /* let next XACTION START clean it up */
280 } 287 }
281 for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST; 288 for (i = 1; i <= bt->read_count; i++)
289 bt->read_data[i] = BMC2HOST;
282 bt->read_count++; /* account for the length byte */ 290 bt->read_count++; /* account for the length byte */
283 291
284 if (bt_debug & BT_DEBUG_MSG) { 292 if (bt_debug & BT_DEBUG_MSG) {
@@ -295,7 +303,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
295 ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) 303 ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
296 return 1; 304 return 1;
297 305
298 if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad packet: " 306 if (bt_debug & BT_DEBUG_MSG)
307 printk(KERN_WARNING "BT: bad packet: "
299 "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", 308 "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
300 bt->write_data[1], bt->write_data[2], bt->write_data[3], 309 bt->write_data[1], bt->write_data[2], bt->write_data[3],
301 bt->read_data[1], bt->read_data[2], bt->read_data[3]); 310 bt->read_data[1], bt->read_data[2], bt->read_data[3]);
@@ -359,7 +368,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
359 time); 368 time);
360 bt->last_state = bt->state; 369 bt->last_state = bt->state;
361 370
362 if (bt->state == BT_STATE_HOSED) return SI_SM_HOSED; 371 if (bt->state == BT_STATE_HOSED)
372 return SI_SM_HOSED;
363 373
364 if (bt->state != BT_STATE_IDLE) { /* do timeout test */ 374 if (bt->state != BT_STATE_IDLE) { /* do timeout test */
365 375
@@ -371,7 +381,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
371 /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT 381 /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT
372 (noticed in ipmi_smic_sm.c January 2004) */ 382 (noticed in ipmi_smic_sm.c January 2004) */
373 383
374 if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT)) time = 100; 384 if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT))
385 time = 100;
375 bt->timeout -= time; 386 bt->timeout -= time;
376 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) { 387 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
377 error_recovery(bt, "timed out"); 388 error_recovery(bt, "timed out");
@@ -393,12 +404,14 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
393 BT_CONTROL(BT_H_BUSY); 404 BT_CONTROL(BT_H_BUSY);
394 break; 405 break;
395 } 406 }
396 if (status & BT_B2H_ATN) break; 407 if (status & BT_B2H_ATN)
408 break;
397 bt->state = BT_STATE_WRITE_BYTES; 409 bt->state = BT_STATE_WRITE_BYTES;
398 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ 410 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
399 411
400 case BT_STATE_WRITE_BYTES: 412 case BT_STATE_WRITE_BYTES:
401 if (status & (BT_B_BUSY | BT_H2B_ATN)) break; 413 if (status & (BT_B_BUSY | BT_H2B_ATN))
414 break;
402 BT_CONTROL(BT_CLR_WR_PTR); 415 BT_CONTROL(BT_CLR_WR_PTR);
403 write_all_bytes(bt); 416 write_all_bytes(bt);
404 BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */ 417 BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */
@@ -406,7 +419,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
406 return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */ 419 return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
407 420
408 case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */ 421 case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
409 if (status & (BT_H2B_ATN | BT_B_BUSY)) break; 422 if (status & (BT_H2B_ATN | BT_B_BUSY))
423 break;
410 bt->state = BT_STATE_B2H_WAIT; 424 bt->state = BT_STATE_B2H_WAIT;
411 /* fall through with status */ 425 /* fall through with status */
412 426
@@ -415,15 +429,18 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
415 generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */ 429 generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
416 430
417 case BT_STATE_B2H_WAIT: 431 case BT_STATE_B2H_WAIT:
418 if (!(status & BT_B2H_ATN)) break; 432 if (!(status & BT_B2H_ATN))
433 break;
419 434
420 /* Assume ordered, uncached writes: no need to wait */ 435 /* Assume ordered, uncached writes: no need to wait */
421 if (!(status & BT_H_BUSY)) BT_CONTROL(BT_H_BUSY); /* set */ 436 if (!(status & BT_H_BUSY))
437 BT_CONTROL(BT_H_BUSY); /* set */
422 BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */ 438 BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */
423 BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */ 439 BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */
424 i = read_all_bytes(bt); 440 i = read_all_bytes(bt);
425 BT_CONTROL(BT_H_BUSY); /* clear */ 441 BT_CONTROL(BT_H_BUSY); /* clear */
426 if (!i) break; /* Try this state again */ 442 if (!i) /* Try this state again */
443 break;
427 bt->state = BT_STATE_READ_END; 444 bt->state = BT_STATE_READ_END;
428 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */ 445 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
429 446
@@ -436,7 +453,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
436 453
437#ifdef MAKE_THIS_TRUE_IF_NECESSARY 454#ifdef MAKE_THIS_TRUE_IF_NECESSARY
438 455
439 if (status & BT_H_BUSY) break; 456 if (status & BT_H_BUSY)
457 break;
440#endif 458#endif
441 bt->seq++; 459 bt->seq++;
442 bt->state = BT_STATE_IDLE; 460 bt->state = BT_STATE_IDLE;
@@ -459,7 +477,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
459 break; 477 break;
460 478
461 case BT_STATE_RESET3: 479 case BT_STATE_RESET3:
462 if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; 480 if (bt->timeout > 0)
481 return SI_SM_CALL_WITH_DELAY;
463 bt->state = BT_STATE_RESTART; /* printk in debug modes */ 482 bt->state = BT_STATE_RESTART; /* printk in debug modes */
464 break; 483 break;
465 484
@@ -485,7 +504,8 @@ static int bt_detect(struct si_sm_data *bt)
485 but that's what you get from reading a bogus address, so we 504 but that's what you get from reading a bogus address, so we
486 test that first. The calling routine uses negative logic. */ 505 test that first. The calling routine uses negative logic. */
487 506
488 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; 507 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
508 return 1;
489 reset_flags(bt); 509 reset_flags(bt);
490 return 0; 510 return 0;
491} 511}
@@ -501,7 +521,6 @@ static int bt_size(void)
501 521
502struct si_sm_handlers bt_smi_handlers = 522struct si_sm_handlers bt_smi_handlers =
503{ 523{
504 .version = IPMI_BT_VERSION,
505 .init_data = bt_init_data, 524 .init_data = bt_init_data,
506 .start_transaction = bt_start_transaction, 525 .start_transaction = bt_start_transaction,
507 .get_result = bt_get_result, 526 .get_result = bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index e0a53570fea1..883ac4352be4 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -47,8 +47,6 @@
47#include <linux/device.h> 47#include <linux/device.h>
48#include <linux/compat.h> 48#include <linux/compat.h>
49 49
50#define IPMI_DEVINTF_VERSION "v33"
51
52struct ipmi_file_private 50struct ipmi_file_private
53{ 51{
54 ipmi_user_t user; 52 ipmi_user_t user;
@@ -411,6 +409,7 @@ static int ipmi_ioctl(struct inode *inode,
411 break; 409 break;
412 } 410 }
413 411
412 /* The next four are legacy, not per-channel. */
414 case IPMICTL_SET_MY_ADDRESS_CMD: 413 case IPMICTL_SET_MY_ADDRESS_CMD:
415 { 414 {
416 unsigned int val; 415 unsigned int val;
@@ -420,22 +419,25 @@ static int ipmi_ioctl(struct inode *inode,
420 break; 419 break;
421 } 420 }
422 421
423 ipmi_set_my_address(priv->user, val); 422 rv = ipmi_set_my_address(priv->user, 0, val);
424 rv = 0;
425 break; 423 break;
426 } 424 }
427 425
428 case IPMICTL_GET_MY_ADDRESS_CMD: 426 case IPMICTL_GET_MY_ADDRESS_CMD:
429 { 427 {
430 unsigned int val; 428 unsigned int val;
429 unsigned char rval;
430
431 rv = ipmi_get_my_address(priv->user, 0, &rval);
432 if (rv)
433 break;
431 434
432 val = ipmi_get_my_address(priv->user); 435 val = rval;
433 436
434 if (copy_to_user(arg, &val, sizeof(val))) { 437 if (copy_to_user(arg, &val, sizeof(val))) {
435 rv = -EFAULT; 438 rv = -EFAULT;
436 break; 439 break;
437 } 440 }
438 rv = 0;
439 break; 441 break;
440 } 442 }
441 443
@@ -448,24 +450,94 @@ static int ipmi_ioctl(struct inode *inode,
448 break; 450 break;
449 } 451 }
450 452
451 ipmi_set_my_LUN(priv->user, val); 453 rv = ipmi_set_my_LUN(priv->user, 0, val);
452 rv = 0;
453 break; 454 break;
454 } 455 }
455 456
456 case IPMICTL_GET_MY_LUN_CMD: 457 case IPMICTL_GET_MY_LUN_CMD:
457 { 458 {
458 unsigned int val; 459 unsigned int val;
460 unsigned char rval;
459 461
460 val = ipmi_get_my_LUN(priv->user); 462 rv = ipmi_get_my_LUN(priv->user, 0, &rval);
463 if (rv)
464 break;
465
466 val = rval;
467
468 if (copy_to_user(arg, &val, sizeof(val))) {
469 rv = -EFAULT;
470 break;
471 }
472 break;
473 }
474
475 case IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD:
476 {
477 struct ipmi_channel_lun_address_set val;
478
479 if (copy_from_user(&val, arg, sizeof(val))) {
480 rv = -EFAULT;
481 break;
482 }
483
484 return ipmi_set_my_address(priv->user, val.channel, val.value);
485 break;
486 }
487
488 case IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD:
489 {
490 struct ipmi_channel_lun_address_set val;
491
492 if (copy_from_user(&val, arg, sizeof(val))) {
493 rv = -EFAULT;
494 break;
495 }
496
497 rv = ipmi_get_my_address(priv->user, val.channel, &val.value);
498 if (rv)
499 break;
500
501 if (copy_to_user(arg, &val, sizeof(val))) {
502 rv = -EFAULT;
503 break;
504 }
505 break;
506 }
507
508 case IPMICTL_SET_MY_CHANNEL_LUN_CMD:
509 {
510 struct ipmi_channel_lun_address_set val;
511
512 if (copy_from_user(&val, arg, sizeof(val))) {
513 rv = -EFAULT;
514 break;
515 }
516
517 rv = ipmi_set_my_LUN(priv->user, val.channel, val.value);
518 break;
519 }
520
521 case IPMICTL_GET_MY_CHANNEL_LUN_CMD:
522 {
523 struct ipmi_channel_lun_address_set val;
524
525 if (copy_from_user(&val, arg, sizeof(val))) {
526 rv = -EFAULT;
527 break;
528 }
529
530 rv = ipmi_get_my_LUN(priv->user, val.channel, &val.value);
531 if (rv)
532 break;
461 533
462 if (copy_to_user(arg, &val, sizeof(val))) { 534 if (copy_to_user(arg, &val, sizeof(val))) {
463 rv = -EFAULT; 535 rv = -EFAULT;
464 break; 536 break;
465 } 537 }
466 rv = 0;
467 break; 538 break;
468 } 539 }
540
469 case IPMICTL_SET_TIMING_PARMS_CMD: 541 case IPMICTL_SET_TIMING_PARMS_CMD:
470 { 542 {
471 struct ipmi_timing_parms parms; 543 struct ipmi_timing_parms parms;
@@ -748,8 +820,7 @@ static __init int init_ipmi_devintf(void)
748 if (ipmi_major < 0) 820 if (ipmi_major < 0)
749 return -EINVAL; 821 return -EINVAL;
750 822
751 printk(KERN_INFO "ipmi device interface version " 823 printk(KERN_INFO "ipmi device interface\n");
752 IPMI_DEVINTF_VERSION "\n");
753 824
754 ipmi_class = class_create(THIS_MODULE, "ipmi"); 825 ipmi_class = class_create(THIS_MODULE, "ipmi");
755 if (IS_ERR(ipmi_class)) { 826 if (IS_ERR(ipmi_class)) {
@@ -792,3 +863,5 @@ static __exit void cleanup_ipmi(void)
792module_exit(cleanup_ipmi); 863module_exit(cleanup_ipmi);
793 864
794MODULE_LICENSE("GPL"); 865MODULE_LICENSE("GPL");
866MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
867MODULE_DESCRIPTION("Linux device interface for the IPMI message handler.");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 48cce24329be..d21853a594a3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -42,8 +42,6 @@
42#include <linux/ipmi_msgdefs.h> /* for completion codes */ 42#include <linux/ipmi_msgdefs.h> /* for completion codes */
43#include "ipmi_si_sm.h" 43#include "ipmi_si_sm.h"
44 44
45#define IPMI_KCS_VERSION "v33"
46
47/* Set this if you want a printout of why the state machine was hosed 45/* Set this if you want a printout of why the state machine was hosed
48 when it gets hosed. */ 46 when it gets hosed. */
49#define DEBUG_HOSED_REASON 47#define DEBUG_HOSED_REASON
@@ -489,7 +487,6 @@ static void kcs_cleanup(struct si_sm_data *kcs)
489 487
490struct si_sm_handlers kcs_smi_handlers = 488struct si_sm_handlers kcs_smi_handlers =
491{ 489{
492 .version = IPMI_KCS_VERSION,
493 .init_data = init_kcs_data, 490 .init_data = init_kcs_data,
494 .start_transaction = start_kcs_transaction, 491 .start_transaction = start_kcs_transaction,
495 .get_result = get_kcs_result, 492 .get_result = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e16c13fe698d..463351d4f942 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -47,7 +47,8 @@
47#include <linux/proc_fs.h> 47#include <linux/proc_fs.h>
48 48
49#define PFX "IPMI message handler: " 49#define PFX "IPMI message handler: "
50#define IPMI_MSGHANDLER_VERSION "v33" 50
51#define IPMI_DRIVER_VERSION "36.0"
51 52
52static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53static int ipmi_init_msghandler(void); 54static int ipmi_init_msghandler(void);
@@ -116,7 +117,7 @@ struct seq_table
116 do { \ 117 do { \
117 seq = ((msgid >> 26) & 0x3f); \ 118 seq = ((msgid >> 26) & 0x3f); \
118 seqid = (msgid & 0x3fffff); \ 119 seqid = (msgid & 0x3fffff); \
119 } while(0) 120 } while (0)
120 121
121#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) 122#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
122 123
@@ -124,6 +125,14 @@ struct ipmi_channel
124{ 125{
125 unsigned char medium; 126 unsigned char medium;
126 unsigned char protocol; 127 unsigned char protocol;
128
129 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
130 but may be changed by the user. */
131 unsigned char address;
132
133 /* My LUN. This should generally stay the SMS LUN, but just in
134 case... */
135 unsigned char lun;
127}; 136};
128 137
129#ifdef CONFIG_PROC_FS 138#ifdef CONFIG_PROC_FS
@@ -135,7 +144,7 @@ struct ipmi_proc_entry
135#endif 144#endif
136 145
137#define IPMI_IPMB_NUM_SEQ 64 146#define IPMI_IPMB_NUM_SEQ 64
138#define IPMI_MAX_CHANNELS 8 147#define IPMI_MAX_CHANNELS 16
139struct ipmi_smi 148struct ipmi_smi
140{ 149{
141 /* What interface number are we? */ 150 /* What interface number are we? */
@@ -193,20 +202,6 @@ struct ipmi_smi
193 struct list_head waiting_events; 202 struct list_head waiting_events;
194 unsigned int waiting_events_count; /* How many events in queue? */ 203 unsigned int waiting_events_count; /* How many events in queue? */
195 204
196 /* This will be non-null if someone registers to receive all
197 IPMI commands (this is for interface emulation). There
198 may not be any things in the cmd_rcvrs list above when
199 this is registered. */
200 ipmi_user_t all_cmd_rcvr;
201
202 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
203 but may be changed by the user. */
204 unsigned char my_address;
205
206 /* My LUN. This should generally stay the SMS LUN, but just in
207 case... */
208 unsigned char my_lun;
209
210 /* The event receiver for my BMC, only really used at panic 205 /* The event receiver for my BMC, only really used at panic
211 shutdown as a place to store this. */ 206 shutdown as a place to store this. */
212 unsigned char event_receiver; 207 unsigned char event_receiver;
@@ -218,7 +213,7 @@ struct ipmi_smi
218 interface comes in with a NULL user, call this routine with 213 interface comes in with a NULL user, call this routine with
219 it. Note that the message will still be freed by the 214 it. Note that the message will still be freed by the
220 caller. This only works on the system interface. */ 215 caller. This only works on the system interface. */
221 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_smi_msg *msg); 216 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
222 217
223 /* When we are scanning the channels for an SMI, this will 218 /* When we are scanning the channels for an SMI, this will
224 tell which channel we are scanning. */ 219 tell which channel we are scanning. */
@@ -325,7 +320,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
325 down_read(&interfaces_sem); 320 down_read(&interfaces_sem);
326 down_write(&smi_watchers_sem); 321 down_write(&smi_watchers_sem);
327 list_add(&(watcher->link), &smi_watchers); 322 list_add(&(watcher->link), &smi_watchers);
328 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 323 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
329 if (ipmi_interfaces[i] != NULL) { 324 if (ipmi_interfaces[i] != NULL) {
330 watcher->new_smi(i); 325 watcher->new_smi(i);
331 } 326 }
@@ -458,7 +453,27 @@ unsigned int ipmi_addr_length(int addr_type)
458 453
459static void deliver_response(struct ipmi_recv_msg *msg) 454static void deliver_response(struct ipmi_recv_msg *msg)
460{ 455{
461 msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data); 456 if (! msg->user) {
457 ipmi_smi_t intf = msg->user_msg_data;
458 unsigned long flags;
459
460 /* Special handling for NULL users. */
461 if (intf->null_user_handler) {
462 intf->null_user_handler(intf, msg);
463 spin_lock_irqsave(&intf->counter_lock, flags);
464 intf->handled_local_responses++;
465 spin_unlock_irqrestore(&intf->counter_lock, flags);
466 } else {
467 /* No handler, so give up. */
468 spin_lock_irqsave(&intf->counter_lock, flags);
469 intf->unhandled_local_responses++;
470 spin_unlock_irqrestore(&intf->counter_lock, flags);
471 }
472 ipmi_free_recv_msg(msg);
473 } else {
474 msg->user->handler->ipmi_recv_hndl(msg,
475 msg->user->handler_data);
476 }
462} 477}
463 478
464/* Find the next sequence number not being used and add the given 479/* Find the next sequence number not being used and add the given
@@ -475,9 +490,9 @@ static int intf_next_seq(ipmi_smi_t intf,
475 int rv = 0; 490 int rv = 0;
476 unsigned int i; 491 unsigned int i;
477 492
478 for (i=intf->curr_seq; 493 for (i = intf->curr_seq;
479 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; 494 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
480 i=(i+1)%IPMI_IPMB_NUM_SEQ) 495 i = (i+1)%IPMI_IPMB_NUM_SEQ)
481 { 496 {
482 if (! intf->seq_table[i].inuse) 497 if (! intf->seq_table[i].inuse)
483 break; 498 break;
@@ -712,7 +727,7 @@ static int ipmi_destroy_user_nolock(ipmi_user_t user)
712 727
713 /* Remove the user from the interfaces sequence table. */ 728 /* Remove the user from the interfaces sequence table. */
714 spin_lock_irqsave(&(user->intf->seq_lock), flags); 729 spin_lock_irqsave(&(user->intf->seq_lock), flags);
715 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) { 730 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
716 if (user->intf->seq_table[i].inuse 731 if (user->intf->seq_table[i].inuse
717 && (user->intf->seq_table[i].recv_msg->user == user)) 732 && (user->intf->seq_table[i].recv_msg->user == user))
718 { 733 {
@@ -766,26 +781,44 @@ void ipmi_get_version(ipmi_user_t user,
766 *minor = user->intf->version_minor; 781 *minor = user->intf->version_minor;
767} 782}
768 783
769void ipmi_set_my_address(ipmi_user_t user, 784int ipmi_set_my_address(ipmi_user_t user,
770 unsigned char address) 785 unsigned int channel,
786 unsigned char address)
771{ 787{
772 user->intf->my_address = address; 788 if (channel >= IPMI_MAX_CHANNELS)
789 return -EINVAL;
790 user->intf->channels[channel].address = address;
791 return 0;
773} 792}
774 793
775unsigned char ipmi_get_my_address(ipmi_user_t user) 794int ipmi_get_my_address(ipmi_user_t user,
795 unsigned int channel,
796 unsigned char *address)
776{ 797{
777 return user->intf->my_address; 798 if (channel >= IPMI_MAX_CHANNELS)
799 return -EINVAL;
800 *address = user->intf->channels[channel].address;
801 return 0;
778} 802}
779 803
780void ipmi_set_my_LUN(ipmi_user_t user, 804int ipmi_set_my_LUN(ipmi_user_t user,
781 unsigned char LUN) 805 unsigned int channel,
806 unsigned char LUN)
782{ 807{
783 user->intf->my_lun = LUN & 0x3; 808 if (channel >= IPMI_MAX_CHANNELS)
809 return -EINVAL;
810 user->intf->channels[channel].lun = LUN & 0x3;
811 return 0;
784} 812}
785 813
786unsigned char ipmi_get_my_LUN(ipmi_user_t user) 814int ipmi_get_my_LUN(ipmi_user_t user,
815 unsigned int channel,
816 unsigned char *address)
787{ 817{
788 return user->intf->my_lun; 818 if (channel >= IPMI_MAX_CHANNELS)
819 return -EINVAL;
820 *address = user->intf->channels[channel].lun;
821 return 0;
789} 822}
790 823
791int ipmi_set_gets_events(ipmi_user_t user, int val) 824int ipmi_set_gets_events(ipmi_user_t user, int val)
@@ -828,11 +861,6 @@ int ipmi_register_for_cmd(ipmi_user_t user,
828 861
829 read_lock(&(user->intf->users_lock)); 862 read_lock(&(user->intf->users_lock));
830 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); 863 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
831 if (user->intf->all_cmd_rcvr != NULL) {
832 rv = -EBUSY;
833 goto out_unlock;
834 }
835
836 /* Make sure the command/netfn is not already registered. */ 864 /* Make sure the command/netfn is not already registered. */
837 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) { 865 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) {
838 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) { 866 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
@@ -847,7 +875,7 @@ int ipmi_register_for_cmd(ipmi_user_t user,
847 rcvr->user = user; 875 rcvr->user = user;
848 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs)); 876 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
849 } 877 }
850 out_unlock: 878
851 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); 879 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
852 read_unlock(&(user->intf->users_lock)); 880 read_unlock(&(user->intf->users_lock));
853 881
@@ -1213,7 +1241,7 @@ static inline int i_ipmi_request(ipmi_user_t user,
1213 unsigned char ipmb_seq; 1241 unsigned char ipmb_seq;
1214 long seqid; 1242 long seqid;
1215 1243
1216 if (addr->channel > IPMI_NUM_CHANNELS) { 1244 if (addr->channel >= IPMI_NUM_CHANNELS) {
1217 spin_lock_irqsave(&intf->counter_lock, flags); 1245 spin_lock_irqsave(&intf->counter_lock, flags);
1218 intf->sent_invalid_commands++; 1246 intf->sent_invalid_commands++;
1219 spin_unlock_irqrestore(&intf->counter_lock, flags); 1247 spin_unlock_irqrestore(&intf->counter_lock, flags);
@@ -1331,7 +1359,7 @@ static inline int i_ipmi_request(ipmi_user_t user,
1331#ifdef DEBUG_MSGING 1359#ifdef DEBUG_MSGING
1332 { 1360 {
1333 int m; 1361 int m;
1334 for (m=0; m<smi_msg->data_size; m++) 1362 for (m = 0; m < smi_msg->data_size; m++)
1335 printk(" %2.2x", smi_msg->data[m]); 1363 printk(" %2.2x", smi_msg->data[m]);
1336 printk("\n"); 1364 printk("\n");
1337 } 1365 }
@@ -1346,6 +1374,18 @@ static inline int i_ipmi_request(ipmi_user_t user,
1346 return rv; 1374 return rv;
1347} 1375}
1348 1376
1377static int check_addr(ipmi_smi_t intf,
1378 struct ipmi_addr *addr,
1379 unsigned char *saddr,
1380 unsigned char *lun)
1381{
1382 if (addr->channel >= IPMI_MAX_CHANNELS)
1383 return -EINVAL;
1384 *lun = intf->channels[addr->channel].lun;
1385 *saddr = intf->channels[addr->channel].address;
1386 return 0;
1387}
1388
1349int ipmi_request_settime(ipmi_user_t user, 1389int ipmi_request_settime(ipmi_user_t user,
1350 struct ipmi_addr *addr, 1390 struct ipmi_addr *addr,
1351 long msgid, 1391 long msgid,
@@ -1355,6 +1395,14 @@ int ipmi_request_settime(ipmi_user_t user,
1355 int retries, 1395 int retries,
1356 unsigned int retry_time_ms) 1396 unsigned int retry_time_ms)
1357{ 1397{
1398 unsigned char saddr, lun;
1399 int rv;
1400
1401 if (! user)
1402 return -EINVAL;
1403 rv = check_addr(user->intf, addr, &saddr, &lun);
1404 if (rv)
1405 return rv;
1358 return i_ipmi_request(user, 1406 return i_ipmi_request(user,
1359 user->intf, 1407 user->intf,
1360 addr, 1408 addr,
@@ -1363,8 +1411,8 @@ int ipmi_request_settime(ipmi_user_t user,
1363 user_msg_data, 1411 user_msg_data,
1364 NULL, NULL, 1412 NULL, NULL,
1365 priority, 1413 priority,
1366 user->intf->my_address, 1414 saddr,
1367 user->intf->my_lun, 1415 lun,
1368 retries, 1416 retries,
1369 retry_time_ms); 1417 retry_time_ms);
1370} 1418}
@@ -1378,6 +1426,14 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1378 struct ipmi_recv_msg *supplied_recv, 1426 struct ipmi_recv_msg *supplied_recv,
1379 int priority) 1427 int priority)
1380{ 1428{
1429 unsigned char saddr, lun;
1430 int rv;
1431
1432 if (! user)
1433 return -EINVAL;
1434 rv = check_addr(user->intf, addr, &saddr, &lun);
1435 if (rv)
1436 return rv;
1381 return i_ipmi_request(user, 1437 return i_ipmi_request(user,
1382 user->intf, 1438 user->intf,
1383 addr, 1439 addr,
@@ -1387,8 +1443,8 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
1387 supplied_smi, 1443 supplied_smi,
1388 supplied_recv, 1444 supplied_recv,
1389 priority, 1445 priority,
1390 user->intf->my_address, 1446 saddr,
1391 user->intf->my_lun, 1447 lun,
1392 -1, 0); 1448 -1, 0);
1393} 1449}
1394 1450
@@ -1397,8 +1453,15 @@ static int ipmb_file_read_proc(char *page, char **start, off_t off,
1397{ 1453{
1398 char *out = (char *) page; 1454 char *out = (char *) page;
1399 ipmi_smi_t intf = data; 1455 ipmi_smi_t intf = data;
1456 int i;
1457 int rv= 0;
1400 1458
1401 return sprintf(out, "%x\n", intf->my_address); 1459 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1460 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1461 out[rv-1] = '\n'; /* Replace the final space with a newline */
1462 out[rv] = '\0';
1463 rv++;
1464 return rv;
1402} 1465}
1403 1466
1404static int version_file_read_proc(char *page, char **start, off_t off, 1467static int version_file_read_proc(char *page, char **start, off_t off,
@@ -1588,29 +1651,30 @@ send_channel_info_cmd(ipmi_smi_t intf, int chan)
1588 (struct ipmi_addr *) &si, 1651 (struct ipmi_addr *) &si,
1589 0, 1652 0,
1590 &msg, 1653 &msg,
1591 NULL, 1654 intf,
1592 NULL, 1655 NULL,
1593 NULL, 1656 NULL,
1594 0, 1657 0,
1595 intf->my_address, 1658 intf->channels[0].address,
1596 intf->my_lun, 1659 intf->channels[0].lun,
1597 -1, 0); 1660 -1, 0);
1598} 1661}
1599 1662
1600static void 1663static void
1601channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg) 1664channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
1602{ 1665{
1603 int rv = 0; 1666 int rv = 0;
1604 int chan; 1667 int chan;
1605 1668
1606 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2)) 1669 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
1607 && (msg->rsp[1] == IPMI_GET_CHANNEL_INFO_CMD)) 1670 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
1671 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
1608 { 1672 {
1609 /* It's the one we want */ 1673 /* It's the one we want */
1610 if (msg->rsp[2] != 0) { 1674 if (msg->msg.data[0] != 0) {
1611 /* Got an error from the channel, just go on. */ 1675 /* Got an error from the channel, just go on. */
1612 1676
1613 if (msg->rsp[2] == IPMI_INVALID_COMMAND_ERR) { 1677 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
1614 /* If the MC does not support this 1678 /* If the MC does not support this
1615 command, that is legal. We just 1679 command, that is legal. We just
1616 assume it has one IPMB at channel 1680 assume it has one IPMB at channel
@@ -1627,13 +1691,13 @@ channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
1627 } 1691 }
1628 goto next_channel; 1692 goto next_channel;
1629 } 1693 }
1630 if (msg->rsp_size < 6) { 1694 if (msg->msg.data_len < 4) {
1631 /* Message not big enough, just go on. */ 1695 /* Message not big enough, just go on. */
1632 goto next_channel; 1696 goto next_channel;
1633 } 1697 }
1634 chan = intf->curr_channel; 1698 chan = intf->curr_channel;
1635 intf->channels[chan].medium = msg->rsp[4] & 0x7f; 1699 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
1636 intf->channels[chan].protocol = msg->rsp[5] & 0x1f; 1700 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
1637 1701
1638 next_channel: 1702 next_channel:
1639 intf->curr_channel++; 1703 intf->curr_channel++;
@@ -1691,22 +1755,24 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1691 rv = -ENOMEM; 1755 rv = -ENOMEM;
1692 1756
1693 down_write(&interfaces_sem); 1757 down_write(&interfaces_sem);
1694 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 1758 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1695 if (ipmi_interfaces[i] == NULL) { 1759 if (ipmi_interfaces[i] == NULL) {
1696 new_intf->intf_num = i; 1760 new_intf->intf_num = i;
1697 new_intf->version_major = version_major; 1761 new_intf->version_major = version_major;
1698 new_intf->version_minor = version_minor; 1762 new_intf->version_minor = version_minor;
1699 if (slave_addr == 0) 1763 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1700 new_intf->my_address = IPMI_BMC_SLAVE_ADDR; 1764 new_intf->channels[j].address
1701 else 1765 = IPMI_BMC_SLAVE_ADDR;
1702 new_intf->my_address = slave_addr; 1766 new_intf->channels[j].lun = 2;
1703 new_intf->my_lun = 2; /* the SMS LUN. */ 1767 }
1768 if (slave_addr != 0)
1769 new_intf->channels[0].address = slave_addr;
1704 rwlock_init(&(new_intf->users_lock)); 1770 rwlock_init(&(new_intf->users_lock));
1705 INIT_LIST_HEAD(&(new_intf->users)); 1771 INIT_LIST_HEAD(&(new_intf->users));
1706 new_intf->handlers = handlers; 1772 new_intf->handlers = handlers;
1707 new_intf->send_info = send_info; 1773 new_intf->send_info = send_info;
1708 spin_lock_init(&(new_intf->seq_lock)); 1774 spin_lock_init(&(new_intf->seq_lock));
1709 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) { 1775 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1710 new_intf->seq_table[j].inuse = 0; 1776 new_intf->seq_table[j].inuse = 0;
1711 new_intf->seq_table[j].seqid = 0; 1777 new_intf->seq_table[j].seqid = 0;
1712 } 1778 }
@@ -1722,7 +1788,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1722 rwlock_init(&(new_intf->cmd_rcvr_lock)); 1788 rwlock_init(&(new_intf->cmd_rcvr_lock));
1723 init_waitqueue_head(&new_intf->waitq); 1789 init_waitqueue_head(&new_intf->waitq);
1724 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs)); 1790 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1725 new_intf->all_cmd_rcvr = NULL;
1726 1791
1727 spin_lock_init(&(new_intf->counter_lock)); 1792 spin_lock_init(&(new_intf->counter_lock));
1728 1793
@@ -1814,7 +1879,7 @@ static void clean_up_interface_data(ipmi_smi_t intf)
1814 free_recv_msg_list(&(intf->waiting_events)); 1879 free_recv_msg_list(&(intf->waiting_events));
1815 free_cmd_rcvr_list(&(intf->cmd_rcvrs)); 1880 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1816 1881
1817 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) { 1882 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1818 if ((intf->seq_table[i].inuse) 1883 if ((intf->seq_table[i].inuse)
1819 && (intf->seq_table[i].recv_msg)) 1884 && (intf->seq_table[i].recv_msg))
1820 { 1885 {
@@ -1833,7 +1898,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
1833 down_write(&interfaces_sem); 1898 down_write(&interfaces_sem);
1834 if (list_empty(&(intf->users))) 1899 if (list_empty(&(intf->users)))
1835 { 1900 {
1836 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 1901 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1837 if (ipmi_interfaces[i] == intf) { 1902 if (ipmi_interfaces[i] == intf) {
1838 remove_proc_entries(intf); 1903 remove_proc_entries(intf);
1839 spin_lock_irqsave(&interfaces_lock, flags); 1904 spin_lock_irqsave(&interfaces_lock, flags);
@@ -1960,15 +2025,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1960 2025
1961 read_lock(&(intf->cmd_rcvr_lock)); 2026 read_lock(&(intf->cmd_rcvr_lock));
1962 2027
1963 if (intf->all_cmd_rcvr) { 2028 /* Find the command/netfn. */
1964 user = intf->all_cmd_rcvr; 2029 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
1965 } else { 2030 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1966 /* Find the command/netfn. */ 2031 user = rcvr->user;
1967 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2032 break;
1968 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1969 user = rcvr->user;
1970 break;
1971 }
1972 } 2033 }
1973 } 2034 }
1974 read_unlock(&(intf->cmd_rcvr_lock)); 2035 read_unlock(&(intf->cmd_rcvr_lock));
@@ -1985,7 +2046,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1985 msg->data[3] = msg->rsp[6]; 2046 msg->data[3] = msg->rsp[6];
1986 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); 2047 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
1987 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); 2048 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
1988 msg->data[6] = intf->my_address; 2049 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
1989 /* rqseq/lun */ 2050 /* rqseq/lun */
1990 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); 2051 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
1991 msg->data[8] = msg->rsp[8]; /* cmd */ 2052 msg->data[8] = msg->rsp[8]; /* cmd */
@@ -1997,7 +2058,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1997 { 2058 {
1998 int m; 2059 int m;
1999 printk("Invalid command:"); 2060 printk("Invalid command:");
2000 for (m=0; m<msg->data_size; m++) 2061 for (m = 0; m < msg->data_size; m++)
2001 printk(" %2.2x", msg->data[m]); 2062 printk(" %2.2x", msg->data[m]);
2002 printk("\n"); 2063 printk("\n");
2003 } 2064 }
@@ -2145,15 +2206,11 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2145 2206
2146 read_lock(&(intf->cmd_rcvr_lock)); 2207 read_lock(&(intf->cmd_rcvr_lock));
2147 2208
2148 if (intf->all_cmd_rcvr) { 2209 /* Find the command/netfn. */
2149 user = intf->all_cmd_rcvr; 2210 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
2150 } else { 2211 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
2151 /* Find the command/netfn. */ 2212 user = rcvr->user;
2152 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2213 break;
2153 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
2154 user = rcvr->user;
2155 break;
2156 }
2157 } 2214 }
2158 } 2215 }
2159 read_unlock(&(intf->cmd_rcvr_lock)); 2216 read_unlock(&(intf->cmd_rcvr_lock));
@@ -2330,6 +2387,14 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2330 unsigned long flags; 2387 unsigned long flags;
2331 2388
2332 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 2389 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2390 if (recv_msg == NULL)
2391 {
2392 printk(KERN_WARNING"IPMI message received with no owner. This\n"
2393 "could be because of a malformed message, or\n"
2394 "because of a hardware error. Contact your\n"
2395 "hardware vender for assistance\n");
2396 return 0;
2397 }
2333 2398
2334 /* Make sure the user still exists. */ 2399 /* Make sure the user still exists. */
2335 list_for_each_entry(user, &(intf->users), link) { 2400 list_for_each_entry(user, &(intf->users), link) {
@@ -2340,19 +2405,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2340 } 2405 }
2341 } 2406 }
2342 2407
2343 if (!found) { 2408 if ((! found) && recv_msg->user) {
2344 /* Special handling for NULL users. */ 2409 /* The user for the message went away, so give up. */
2345 if (!recv_msg->user && intf->null_user_handler){ 2410 spin_lock_irqsave(&intf->counter_lock, flags);
2346 intf->null_user_handler(intf, msg); 2411 intf->unhandled_local_responses++;
2347 spin_lock_irqsave(&intf->counter_lock, flags); 2412 spin_unlock_irqrestore(&intf->counter_lock, flags);
2348 intf->handled_local_responses++;
2349 spin_unlock_irqrestore(&intf->counter_lock, flags);
2350 }else{
2351 /* The user for the message went away, so give up. */
2352 spin_lock_irqsave(&intf->counter_lock, flags);
2353 intf->unhandled_local_responses++;
2354 spin_unlock_irqrestore(&intf->counter_lock, flags);
2355 }
2356 ipmi_free_recv_msg(recv_msg); 2413 ipmi_free_recv_msg(recv_msg);
2357 } else { 2414 } else {
2358 struct ipmi_system_interface_addr *smi_addr; 2415 struct ipmi_system_interface_addr *smi_addr;
@@ -2392,7 +2449,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
2392#ifdef DEBUG_MSGING 2449#ifdef DEBUG_MSGING
2393 int m; 2450 int m;
2394 printk("Recv:"); 2451 printk("Recv:");
2395 for (m=0; m<msg->rsp_size; m++) 2452 for (m = 0; m < msg->rsp_size; m++)
2396 printk(" %2.2x", msg->rsp[m]); 2453 printk(" %2.2x", msg->rsp[m]);
2397 printk("\n"); 2454 printk("\n");
2398#endif 2455#endif
@@ -2626,7 +2683,7 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2626 { 2683 {
2627 int m; 2684 int m;
2628 printk("Resend: "); 2685 printk("Resend: ");
2629 for (m=0; m<smi_msg->data_size; m++) 2686 for (m = 0; m < smi_msg->data_size; m++)
2630 printk(" %2.2x", smi_msg->data[m]); 2687 printk(" %2.2x", smi_msg->data[m]);
2631 printk("\n"); 2688 printk("\n");
2632 } 2689 }
@@ -2647,7 +2704,7 @@ ipmi_timeout_handler(long timeout_period)
2647 INIT_LIST_HEAD(&timeouts); 2704 INIT_LIST_HEAD(&timeouts);
2648 2705
2649 spin_lock(&interfaces_lock); 2706 spin_lock(&interfaces_lock);
2650 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2707 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2651 intf = ipmi_interfaces[i]; 2708 intf = ipmi_interfaces[i];
2652 if (intf == NULL) 2709 if (intf == NULL)
2653 continue; 2710 continue;
@@ -2672,7 +2729,7 @@ ipmi_timeout_handler(long timeout_period)
2672 have timed out, putting them in the timeouts 2729 have timed out, putting them in the timeouts
2673 list. */ 2730 list. */
2674 spin_lock_irqsave(&(intf->seq_lock), flags); 2731 spin_lock_irqsave(&(intf->seq_lock), flags);
2675 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) { 2732 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2676 struct seq_table *ent = &(intf->seq_table[j]); 2733 struct seq_table *ent = &(intf->seq_table[j]);
2677 if (!ent->inuse) 2734 if (!ent->inuse)
2678 continue; 2735 continue;
@@ -2712,7 +2769,7 @@ ipmi_timeout_handler(long timeout_period)
2712 spin_unlock(&intf->counter_lock); 2769 spin_unlock(&intf->counter_lock);
2713 smi_msg = smi_from_recv_msg(intf, 2770 smi_msg = smi_from_recv_msg(intf,
2714 ent->recv_msg, j, ent->seqid); 2771 ent->recv_msg, j, ent->seqid);
2715 if(!smi_msg) 2772 if (! smi_msg)
2716 continue; 2773 continue;
2717 2774
2718 spin_unlock_irqrestore(&(intf->seq_lock),flags); 2775 spin_unlock_irqrestore(&(intf->seq_lock),flags);
@@ -2743,7 +2800,7 @@ static void ipmi_request_event(void)
2743 int i; 2800 int i;
2744 2801
2745 spin_lock(&interfaces_lock); 2802 spin_lock(&interfaces_lock);
2746 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2803 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2747 intf = ipmi_interfaces[i]; 2804 intf = ipmi_interfaces[i];
2748 if (intf == NULL) 2805 if (intf == NULL)
2749 continue; 2806 continue;
@@ -2838,28 +2895,30 @@ static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2838} 2895}
2839 2896
2840#ifdef CONFIG_IPMI_PANIC_STRING 2897#ifdef CONFIG_IPMI_PANIC_STRING
2841static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg) 2898static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2842{ 2899{
2843 if ((msg->rsp[0] == (IPMI_NETFN_SENSOR_EVENT_RESPONSE << 2)) 2900 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2844 && (msg->rsp[1] == IPMI_GET_EVENT_RECEIVER_CMD) 2901 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
2845 && (msg->rsp[2] == IPMI_CC_NO_ERROR)) 2902 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
2903 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2846 { 2904 {
2847 /* A get event receiver command, save it. */ 2905 /* A get event receiver command, save it. */
2848 intf->event_receiver = msg->rsp[3]; 2906 intf->event_receiver = msg->msg.data[1];
2849 intf->event_receiver_lun = msg->rsp[4] & 0x3; 2907 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
2850 } 2908 }
2851} 2909}
2852 2910
2853static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg) 2911static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2854{ 2912{
2855 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2)) 2913 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2856 && (msg->rsp[1] == IPMI_GET_DEVICE_ID_CMD) 2914 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2857 && (msg->rsp[2] == IPMI_CC_NO_ERROR)) 2915 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
2916 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
2858 { 2917 {
2859 /* A get device id command, save if we are an event 2918 /* A get device id command, save if we are an event
2860 receiver or generator. */ 2919 receiver or generator. */
2861 intf->local_sel_device = (msg->rsp[8] >> 2) & 1; 2920 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
2862 intf->local_event_generator = (msg->rsp[8] >> 5) & 1; 2921 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
2863 } 2922 }
2864} 2923}
2865#endif 2924#endif
@@ -2903,7 +2962,7 @@ static void send_panic_events(char *str)
2903 recv_msg.done = dummy_recv_done_handler; 2962 recv_msg.done = dummy_recv_done_handler;
2904 2963
2905 /* For every registered interface, send the event. */ 2964 /* For every registered interface, send the event. */
2906 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2965 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2907 intf = ipmi_interfaces[i]; 2966 intf = ipmi_interfaces[i];
2908 if (intf == NULL) 2967 if (intf == NULL)
2909 continue; 2968 continue;
@@ -2915,12 +2974,12 @@ static void send_panic_events(char *str)
2915 &addr, 2974 &addr,
2916 0, 2975 0,
2917 &msg, 2976 &msg,
2918 NULL, 2977 intf,
2919 &smi_msg, 2978 &smi_msg,
2920 &recv_msg, 2979 &recv_msg,
2921 0, 2980 0,
2922 intf->my_address, 2981 intf->channels[0].address,
2923 intf->my_lun, 2982 intf->channels[0].lun,
2924 0, 1); /* Don't retry, and don't wait. */ 2983 0, 1); /* Don't retry, and don't wait. */
2925 } 2984 }
2926 2985
@@ -2930,7 +2989,7 @@ static void send_panic_events(char *str)
2930 if (!str) 2989 if (!str)
2931 return; 2990 return;
2932 2991
2933 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 2992 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2934 char *p = str; 2993 char *p = str;
2935 struct ipmi_ipmb_addr *ipmb; 2994 struct ipmi_ipmb_addr *ipmb;
2936 int j; 2995 int j;
@@ -2961,12 +3020,12 @@ static void send_panic_events(char *str)
2961 &addr, 3020 &addr,
2962 0, 3021 0,
2963 &msg, 3022 &msg,
2964 NULL, 3023 intf,
2965 &smi_msg, 3024 &smi_msg,
2966 &recv_msg, 3025 &recv_msg,
2967 0, 3026 0,
2968 intf->my_address, 3027 intf->channels[0].address,
2969 intf->my_lun, 3028 intf->channels[0].lun,
2970 0, 1); /* Don't retry, and don't wait. */ 3029 0, 1); /* Don't retry, and don't wait. */
2971 3030
2972 if (intf->local_event_generator) { 3031 if (intf->local_event_generator) {
@@ -2981,12 +3040,12 @@ static void send_panic_events(char *str)
2981 &addr, 3040 &addr,
2982 0, 3041 0,
2983 &msg, 3042 &msg,
2984 NULL, 3043 intf,
2985 &smi_msg, 3044 &smi_msg,
2986 &recv_msg, 3045 &recv_msg,
2987 0, 3046 0,
2988 intf->my_address, 3047 intf->channels[0].address,
2989 intf->my_lun, 3048 intf->channels[0].lun,
2990 0, 1); /* no retry, and no wait. */ 3049 0, 1); /* no retry, and no wait. */
2991 } 3050 }
2992 intf->null_user_handler = NULL; 3051 intf->null_user_handler = NULL;
@@ -2996,7 +3055,7 @@ static void send_panic_events(char *str)
2996 be zero, and it must not be my address. */ 3055 be zero, and it must not be my address. */
2997 if (((intf->event_receiver & 1) == 0) 3056 if (((intf->event_receiver & 1) == 0)
2998 && (intf->event_receiver != 0) 3057 && (intf->event_receiver != 0)
2999 && (intf->event_receiver != intf->my_address)) 3058 && (intf->event_receiver != intf->channels[0].address))
3000 { 3059 {
3001 /* The event receiver is valid, send an IPMB 3060 /* The event receiver is valid, send an IPMB
3002 message. */ 3061 message. */
@@ -3031,7 +3090,7 @@ static void send_panic_events(char *str)
3031 data[0] = 0; 3090 data[0] = 0;
3032 data[1] = 0; 3091 data[1] = 0;
3033 data[2] = 0xf0; /* OEM event without timestamp. */ 3092 data[2] = 0xf0; /* OEM event without timestamp. */
3034 data[3] = intf->my_address; 3093 data[3] = intf->channels[0].address;
3035 data[4] = j++; /* sequence # */ 3094 data[4] = j++; /* sequence # */
3036 /* Always give 11 bytes, so strncpy will fill 3095 /* Always give 11 bytes, so strncpy will fill
3037 it with zeroes for me. */ 3096 it with zeroes for me. */
@@ -3043,12 +3102,12 @@ static void send_panic_events(char *str)
3043 &addr, 3102 &addr,
3044 0, 3103 0,
3045 &msg, 3104 &msg,
3046 NULL, 3105 intf,
3047 &smi_msg, 3106 &smi_msg,
3048 &recv_msg, 3107 &recv_msg,
3049 0, 3108 0,
3050 intf->my_address, 3109 intf->channels[0].address,
3051 intf->my_lun, 3110 intf->channels[0].lun,
3052 0, 1); /* no retry, and no wait. */ 3111 0, 1); /* no retry, and no wait. */
3053 } 3112 }
3054 } 3113 }
@@ -3070,7 +3129,7 @@ static int panic_event(struct notifier_block *this,
3070 has_paniced = 1; 3129 has_paniced = 1;
3071 3130
3072 /* For every registered interface, set it to run to completion. */ 3131 /* For every registered interface, set it to run to completion. */
3073 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 3132 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3074 intf = ipmi_interfaces[i]; 3133 intf = ipmi_interfaces[i];
3075 if (intf == NULL) 3134 if (intf == NULL)
3076 continue; 3135 continue;
@@ -3099,9 +3158,9 @@ static int ipmi_init_msghandler(void)
3099 return 0; 3158 return 0;
3100 3159
3101 printk(KERN_INFO "ipmi message handler version " 3160 printk(KERN_INFO "ipmi message handler version "
3102 IPMI_MSGHANDLER_VERSION "\n"); 3161 IPMI_DRIVER_VERSION "\n");
3103 3162
3104 for (i=0; i<MAX_IPMI_INTERFACES; i++) { 3163 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3105 ipmi_interfaces[i] = NULL; 3164 ipmi_interfaces[i] = NULL;
3106 } 3165 }
3107 3166
@@ -3171,6 +3230,9 @@ module_exit(cleanup_ipmi);
3171 3230
3172module_init(ipmi_init_msghandler_mod); 3231module_init(ipmi_init_msghandler_mod);
3173MODULE_LICENSE("GPL"); 3232MODULE_LICENSE("GPL");
3233MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3234MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
3235MODULE_VERSION(IPMI_DRIVER_VERSION);
3174 3236
3175EXPORT_SYMBOL(ipmi_create_user); 3237EXPORT_SYMBOL(ipmi_create_user);
3176EXPORT_SYMBOL(ipmi_destroy_user); 3238EXPORT_SYMBOL(ipmi_destroy_user);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index f951c30236c9..e82a96ba396b 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -42,7 +42,6 @@
42#include <linux/ipmi_smi.h> 42#include <linux/ipmi_smi.h>
43 43
44#define PFX "IPMI poweroff: " 44#define PFX "IPMI poweroff: "
45#define IPMI_POWEROFF_VERSION "v33"
46 45
47/* Where to we insert our poweroff function? */ 46/* Where to we insert our poweroff function? */
48extern void (*pm_power_off)(void); 47extern void (*pm_power_off)(void);
@@ -53,16 +52,17 @@ extern void (*pm_power_off)(void);
53#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */ 52#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
54 53
55/* the IPMI data command */ 54/* the IPMI data command */
56static int poweroff_control = IPMI_CHASSIS_POWER_DOWN; 55static int poweroff_powercycle;
57 56
58/* parameter definition to allow user to flag power cycle */ 57/* parameter definition to allow user to flag power cycle */
59module_param(poweroff_control, int, IPMI_CHASSIS_POWER_DOWN); 58module_param(poweroff_powercycle, int, 0);
60MODULE_PARM_DESC(poweroff_control, " Set to 2 to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
61 60
62/* Stuff from the get device id command. */ 61/* Stuff from the get device id command. */
63static unsigned int mfg_id; 62static unsigned int mfg_id;
64static unsigned int prod_id; 63static unsigned int prod_id;
65static unsigned char capabilities; 64static unsigned char capabilities;
65static unsigned char ipmi_version;
66 66
67/* We use our own messages for this operation, we don't let the system 67/* We use our own messages for this operation, we don't let the system
68 allocate them, since we may be in a panic situation. The whole 68 allocate them, since we may be in a panic situation. The whole
@@ -338,6 +338,25 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
338} 338}
339 339
340/* 340/*
341 * ipmi_dell_chassis_detect()
342 * Dell systems with IPMI < 1.5 don't set the chassis capability bit
343 * but they can handle a chassis poweroff or powercycle command.
344 */
345
346#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
347static int ipmi_dell_chassis_detect (ipmi_user_t user)
348{
349 const char ipmi_version_major = ipmi_version & 0xF;
350 const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
351 const char mfr[3]=DELL_IANA_MFR_ID;
352 if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
353 ipmi_version_major <= 1 &&
354 ipmi_version_minor < 5)
355 return 1;
356 return 0;
357}
358
359/*
341 * Standard chassis support 360 * Standard chassis support
342 */ 361 */
343 362
@@ -366,37 +385,34 @@ static void ipmi_poweroff_chassis (ipmi_user_t user)
366 385
367 powercyclefailed: 386 powercyclefailed:
368 printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", 387 printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
369 ((poweroff_control != IPMI_CHASSIS_POWER_CYCLE) ? "down" : "cycle")); 388 (poweroff_powercycle ? "cycle" : "down"));
370 389
371 /* 390 /*
372 * Power down 391 * Power down
373 */ 392 */
374 send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST; 393 send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
375 send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD; 394 send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
376 data[0] = poweroff_control; 395 if (poweroff_powercycle)
396 data[0] = IPMI_CHASSIS_POWER_CYCLE;
397 else
398 data[0] = IPMI_CHASSIS_POWER_DOWN;
377 send_msg.data = data; 399 send_msg.data = data;
378 send_msg.data_len = sizeof(data); 400 send_msg.data_len = sizeof(data);
379 rv = ipmi_request_in_rc_mode(user, 401 rv = ipmi_request_in_rc_mode(user,
380 (struct ipmi_addr *) &smi_addr, 402 (struct ipmi_addr *) &smi_addr,
381 &send_msg); 403 &send_msg);
382 if (rv) { 404 if (rv) {
383 switch (poweroff_control) { 405 if (poweroff_powercycle) {
384 case IPMI_CHASSIS_POWER_CYCLE: 406 /* power cycle failed, default to power down */
385 /* power cycle failed, default to power down */ 407 printk(KERN_ERR PFX "Unable to send chassis power " \
386 printk(KERN_ERR PFX "Unable to send chassis power " \ 408 "cycle message, IPMI error 0x%x\n", rv);
387 "cycle message, IPMI error 0x%x\n", rv); 409 poweroff_powercycle = 0;
388 poweroff_control = IPMI_CHASSIS_POWER_DOWN; 410 goto powercyclefailed;
389 goto powercyclefailed;
390
391 case IPMI_CHASSIS_POWER_DOWN:
392 default:
393 printk(KERN_ERR PFX "Unable to send chassis power " \
394 "down message, IPMI error 0x%x\n", rv);
395 break;
396 } 411 }
397 }
398 412
399 return; 413 printk(KERN_ERR PFX "Unable to send chassis power " \
414 "down message, IPMI error 0x%x\n", rv);
415 }
400} 416}
401 417
402 418
@@ -414,6 +430,9 @@ static struct poweroff_function poweroff_functions[] = {
414 { .platform_type = "CPI1", 430 { .platform_type = "CPI1",
415 .detect = ipmi_cpi1_detect, 431 .detect = ipmi_cpi1_detect,
416 .poweroff_func = ipmi_poweroff_cpi1 }, 432 .poweroff_func = ipmi_poweroff_cpi1 },
433 { .platform_type = "chassis",
434 .detect = ipmi_dell_chassis_detect,
435 .poweroff_func = ipmi_poweroff_chassis },
417 /* Chassis should generally be last, other things should override 436 /* Chassis should generally be last, other things should override
418 it. */ 437 it. */
419 { .platform_type = "chassis", 438 { .platform_type = "chassis",
@@ -499,10 +518,11 @@ static void ipmi_po_new_smi(int if_num)
499 prod_id = (halt_recv_msg.msg.data[10] 518 prod_id = (halt_recv_msg.msg.data[10]
500 | (halt_recv_msg.msg.data[11] << 8)); 519 | (halt_recv_msg.msg.data[11] << 8));
501 capabilities = halt_recv_msg.msg.data[6]; 520 capabilities = halt_recv_msg.msg.data[6];
521 ipmi_version = halt_recv_msg.msg.data[5];
502 522
503 523
504 /* Scan for a poweroff method */ 524 /* Scan for a poweroff method */
505 for (i=0; i<NUM_PO_FUNCS; i++) { 525 for (i = 0; i < NUM_PO_FUNCS; i++) {
506 if (poweroff_functions[i].detect(ipmi_user)) 526 if (poweroff_functions[i].detect(ipmi_user))
507 goto found; 527 goto found;
508 } 528 }
@@ -538,39 +558,35 @@ static struct ipmi_smi_watcher smi_watcher =
538 558
539 559
540#ifdef CONFIG_PROC_FS 560#ifdef CONFIG_PROC_FS
541/* displays properties to proc */ 561#include <linux/sysctl.h>
542static int proc_read_chassctrl(char *page, char **start, off_t off, int count, 562
543 int *eof, void *data) 563static ctl_table ipmi_table[] = {
544{ 564 { .ctl_name = DEV_IPMI_POWEROFF_POWERCYCLE,
545 return sprintf(page, "%d\t[ 0=powerdown 2=powercycle ]\n", 565 .procname = "poweroff_powercycle",
546 poweroff_control); 566 .data = &poweroff_powercycle,
547} 567 .maxlen = sizeof(poweroff_powercycle),
568 .mode = 0644,
569 .proc_handler = &proc_dointvec },
570 { }
571};
548 572
549/* process property writes from proc */ 573static ctl_table ipmi_dir_table[] = {
550static int proc_write_chassctrl(struct file *file, const char *buffer, 574 { .ctl_name = DEV_IPMI,
551 unsigned long count, void *data) 575 .procname = "ipmi",
552{ 576 .mode = 0555,
553 int rv = count; 577 .child = ipmi_table },
554 unsigned int newval = 0; 578 { }
555 579};
556 sscanf(buffer, "%d", &newval);
557 switch (newval) {
558 case IPMI_CHASSIS_POWER_CYCLE:
559 printk(KERN_INFO PFX "power cycle is now enabled\n");
560 poweroff_control = newval;
561 break;
562
563 case IPMI_CHASSIS_POWER_DOWN:
564 poweroff_control = IPMI_CHASSIS_POWER_DOWN;
565 break;
566
567 default:
568 rv = -EINVAL;
569 break;
570 }
571 580
572 return rv; 581static ctl_table ipmi_root_table[] = {
573} 582 { .ctl_name = CTL_DEV,
583 .procname = "dev",
584 .mode = 0555,
585 .child = ipmi_dir_table },
586 { }
587};
588
589static struct ctl_table_header *ipmi_table_header;
574#endif /* CONFIG_PROC_FS */ 590#endif /* CONFIG_PROC_FS */
575 591
576/* 592/*
@@ -578,42 +594,32 @@ static int proc_write_chassctrl(struct file *file, const char *buffer,
578 */ 594 */
579static int ipmi_poweroff_init (void) 595static int ipmi_poweroff_init (void)
580{ 596{
581 int rv; 597 int rv;
582 struct proc_dir_entry *file;
583 598
584 printk ("Copyright (C) 2004 MontaVista Software -" 599 printk ("Copyright (C) 2004 MontaVista Software -"
585 " IPMI Powerdown via sys_reboot version " 600 " IPMI Powerdown via sys_reboot.\n");
586 IPMI_POWEROFF_VERSION ".\n"); 601
587 602 if (poweroff_powercycle)
588 switch (poweroff_control) { 603 printk(KERN_INFO PFX "Power cycle is enabled.\n");
589 case IPMI_CHASSIS_POWER_CYCLE: 604
590 printk(KERN_INFO PFX "Power cycle is enabled.\n"); 605#ifdef CONFIG_PROC_FS
591 break; 606 ipmi_table_header = register_sysctl_table(ipmi_root_table, 1);
592 607 if (!ipmi_table_header) {
593 case IPMI_CHASSIS_POWER_DOWN: 608 printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
594 default: 609 rv = -ENOMEM;
595 poweroff_control = IPMI_CHASSIS_POWER_DOWN; 610 goto out_err;
596 break;
597 } 611 }
612#endif
598 613
614#ifdef CONFIG_PROC_FS
599 rv = ipmi_smi_watcher_register(&smi_watcher); 615 rv = ipmi_smi_watcher_register(&smi_watcher);
616#endif
600 if (rv) { 617 if (rv) {
618 unregister_sysctl_table(ipmi_table_header);
601 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); 619 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
602 goto out_err; 620 goto out_err;
603 } 621 }
604 622
605#ifdef CONFIG_PROC_FS
606 file = create_proc_entry("poweroff_control", 0, proc_ipmi_root);
607 if (!file) {
608 printk(KERN_ERR PFX "Unable to create proc power control\n");
609 } else {
610 file->nlink = 1;
611 file->read_proc = proc_read_chassctrl;
612 file->write_proc = proc_write_chassctrl;
613 file->owner = THIS_MODULE;
614 }
615#endif
616
617 out_err: 623 out_err:
618 return rv; 624 return rv;
619} 625}
@@ -624,7 +630,7 @@ static __exit void ipmi_poweroff_cleanup(void)
624 int rv; 630 int rv;
625 631
626#ifdef CONFIG_PROC_FS 632#ifdef CONFIG_PROC_FS
627 remove_proc_entry("poweroff_control", proc_ipmi_root); 633 unregister_sysctl_table(ipmi_table_header);
628#endif 634#endif
629 635
630 ipmi_smi_watcher_unregister(&smi_watcher); 636 ipmi_smi_watcher_unregister(&smi_watcher);
@@ -642,3 +648,5 @@ module_exit(ipmi_poweroff_cleanup);
642 648
643module_init(ipmi_poweroff_init); 649module_init(ipmi_poweroff_init);
644MODULE_LICENSE("GPL"); 650MODULE_LICENSE("GPL");
651MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
652MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index a44b97304e95..1abec687865c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -61,11 +61,11 @@
61# endif 61# endif
62static inline void add_usec_to_timer(struct timer_list *t, long v) 62static inline void add_usec_to_timer(struct timer_list *t, long v)
63{ 63{
64 t->sub_expires += nsec_to_arch_cycle(v * 1000); 64 t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
65 while (t->sub_expires >= arch_cycles_per_jiffy) 65 while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
66 { 66 {
67 t->expires++; 67 t->expires++;
68 t->sub_expires -= arch_cycles_per_jiffy; 68 t->arch_cycle_expires -= arch_cycles_per_jiffy;
69 } 69 }
70} 70}
71#endif 71#endif
@@ -75,8 +75,7 @@ static inline void add_usec_to_timer(struct timer_list *t, long v)
75#include <asm/io.h> 75#include <asm/io.h>
76#include "ipmi_si_sm.h" 76#include "ipmi_si_sm.h"
77#include <linux/init.h> 77#include <linux/init.h>
78 78#include <linux/dmi.h>
79#define IPMI_SI_VERSION "v33"
80 79
81/* Measure times between events in the driver. */ 80/* Measure times between events in the driver. */
82#undef DEBUG_TIMING 81#undef DEBUG_TIMING
@@ -109,6 +108,21 @@ enum si_type {
109 SI_KCS, SI_SMIC, SI_BT 108 SI_KCS, SI_SMIC, SI_BT
110}; 109};
111 110
111struct ipmi_device_id {
112 unsigned char device_id;
113 unsigned char device_revision;
114 unsigned char firmware_revision_1;
115 unsigned char firmware_revision_2;
116 unsigned char ipmi_version;
117 unsigned char additional_device_support;
118 unsigned char manufacturer_id[3];
119 unsigned char product_id[2];
120 unsigned char aux_firmware_revision[4];
121} __attribute__((packed));
122
123#define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
124#define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
125
112struct smi_info 126struct smi_info
113{ 127{
114 ipmi_smi_t intf; 128 ipmi_smi_t intf;
@@ -131,12 +145,24 @@ struct smi_info
131 void (*irq_cleanup)(struct smi_info *info); 145 void (*irq_cleanup)(struct smi_info *info);
132 unsigned int io_size; 146 unsigned int io_size;
133 147
148 /* Per-OEM handler, called from handle_flags().
149 Returns 1 when handle_flags() needs to be re-run
150 or 0 indicating it set si_state itself.
151 */
152 int (*oem_data_avail_handler)(struct smi_info *smi_info);
153
134 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN 154 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
135 is set to hold the flags until we are done handling everything 155 is set to hold the flags until we are done handling everything
136 from the flags. */ 156 from the flags. */
137#define RECEIVE_MSG_AVAIL 0x01 157#define RECEIVE_MSG_AVAIL 0x01
138#define EVENT_MSG_BUFFER_FULL 0x02 158#define EVENT_MSG_BUFFER_FULL 0x02
139#define WDT_PRE_TIMEOUT_INT 0x08 159#define WDT_PRE_TIMEOUT_INT 0x08
160#define OEM0_DATA_AVAIL 0x20
161#define OEM1_DATA_AVAIL 0x40
162#define OEM2_DATA_AVAIL 0x80
163#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
164 OEM1_DATA_AVAIL | \
165 OEM2_DATA_AVAIL)
140 unsigned char msg_flags; 166 unsigned char msg_flags;
141 167
142 /* If set to true, this will request events the next time the 168 /* If set to true, this will request events the next time the
@@ -175,11 +201,7 @@ struct smi_info
175 interrupts. */ 201 interrupts. */
176 int interrupt_disabled; 202 int interrupt_disabled;
177 203
178 unsigned char ipmi_si_dev_rev; 204 struct ipmi_device_id device_id;
179 unsigned char ipmi_si_fw_rev_major;
180 unsigned char ipmi_si_fw_rev_minor;
181 unsigned char ipmi_version_major;
182 unsigned char ipmi_version_minor;
183 205
184 /* Slave address, could be reported from DMI. */ 206 /* Slave address, could be reported from DMI. */
185 unsigned char slave_addr; 207 unsigned char slave_addr;
@@ -245,7 +267,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
245 entry = smi_info->xmit_msgs.next; 267 entry = smi_info->xmit_msgs.next;
246 } 268 }
247 269
248 if (!entry) { 270 if (! entry) {
249 smi_info->curr_msg = NULL; 271 smi_info->curr_msg = NULL;
250 rv = SI_SM_IDLE; 272 rv = SI_SM_IDLE;
251 } else { 273 } else {
@@ -306,7 +328,7 @@ static void start_clear_flags(struct smi_info *smi_info)
306 memory, we will re-enable the interrupt. */ 328 memory, we will re-enable the interrupt. */
307static inline void disable_si_irq(struct smi_info *smi_info) 329static inline void disable_si_irq(struct smi_info *smi_info)
308{ 330{
309 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 331 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
310 disable_irq_nosync(smi_info->irq); 332 disable_irq_nosync(smi_info->irq);
311 smi_info->interrupt_disabled = 1; 333 smi_info->interrupt_disabled = 1;
312 } 334 }
@@ -322,6 +344,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
322 344
323static void handle_flags(struct smi_info *smi_info) 345static void handle_flags(struct smi_info *smi_info)
324{ 346{
347 retry:
325 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { 348 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
326 /* Watchdog pre-timeout */ 349 /* Watchdog pre-timeout */
327 spin_lock(&smi_info->count_lock); 350 spin_lock(&smi_info->count_lock);
@@ -336,7 +359,7 @@ static void handle_flags(struct smi_info *smi_info)
336 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 359 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
337 /* Messages available. */ 360 /* Messages available. */
338 smi_info->curr_msg = ipmi_alloc_smi_msg(); 361 smi_info->curr_msg = ipmi_alloc_smi_msg();
339 if (!smi_info->curr_msg) { 362 if (! smi_info->curr_msg) {
340 disable_si_irq(smi_info); 363 disable_si_irq(smi_info);
341 smi_info->si_state = SI_NORMAL; 364 smi_info->si_state = SI_NORMAL;
342 return; 365 return;
@@ -355,7 +378,7 @@ static void handle_flags(struct smi_info *smi_info)
355 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 378 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
356 /* Events available. */ 379 /* Events available. */
357 smi_info->curr_msg = ipmi_alloc_smi_msg(); 380 smi_info->curr_msg = ipmi_alloc_smi_msg();
358 if (!smi_info->curr_msg) { 381 if (! smi_info->curr_msg) {
359 disable_si_irq(smi_info); 382 disable_si_irq(smi_info);
360 smi_info->si_state = SI_NORMAL; 383 smi_info->si_state = SI_NORMAL;
361 return; 384 return;
@@ -371,6 +394,10 @@ static void handle_flags(struct smi_info *smi_info)
371 smi_info->curr_msg->data, 394 smi_info->curr_msg->data,
372 smi_info->curr_msg->data_size); 395 smi_info->curr_msg->data_size);
373 smi_info->si_state = SI_GETTING_EVENTS; 396 smi_info->si_state = SI_GETTING_EVENTS;
397 } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
398 if (smi_info->oem_data_avail_handler)
399 if (smi_info->oem_data_avail_handler(smi_info))
400 goto retry;
374 } else { 401 } else {
375 smi_info->si_state = SI_NORMAL; 402 smi_info->si_state = SI_NORMAL;
376 } 403 }
@@ -387,7 +414,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
387#endif 414#endif
388 switch (smi_info->si_state) { 415 switch (smi_info->si_state) {
389 case SI_NORMAL: 416 case SI_NORMAL:
390 if (!smi_info->curr_msg) 417 if (! smi_info->curr_msg)
391 break; 418 break;
392 419
393 smi_info->curr_msg->rsp_size 420 smi_info->curr_msg->rsp_size
@@ -761,18 +788,20 @@ static void si_restart_short_timer(struct smi_info *smi_info)
761#if defined(CONFIG_HIGH_RES_TIMERS) 788#if defined(CONFIG_HIGH_RES_TIMERS)
762 unsigned long flags; 789 unsigned long flags;
763 unsigned long jiffies_now; 790 unsigned long jiffies_now;
791 unsigned long seq;
764 792
765 if (del_timer(&(smi_info->si_timer))) { 793 if (del_timer(&(smi_info->si_timer))) {
766 /* If we don't delete the timer, then it will go off 794 /* If we don't delete the timer, then it will go off
767 immediately, anyway. So we only process if we 795 immediately, anyway. So we only process if we
768 actually delete the timer. */ 796 actually delete the timer. */
769 797
770 /* We already have irqsave on, so no need for it 798 do {
771 here. */ 799 seq = read_seqbegin_irqsave(&xtime_lock, flags);
772 read_lock(&xtime_lock); 800 jiffies_now = jiffies;
773 jiffies_now = jiffies; 801 smi_info->si_timer.expires = jiffies_now;
774 smi_info->si_timer.expires = jiffies_now; 802 smi_info->si_timer.arch_cycle_expires
775 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now); 803 = get_arch_cycles(jiffies_now);
804 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
776 805
777 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); 806 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
778 807
@@ -826,15 +855,19 @@ static void smi_timeout(unsigned long data)
826 /* If the state machine asks for a short delay, then shorten 855 /* If the state machine asks for a short delay, then shorten
827 the timer timeout. */ 856 the timer timeout. */
828 if (smi_result == SI_SM_CALL_WITH_DELAY) { 857 if (smi_result == SI_SM_CALL_WITH_DELAY) {
858#if defined(CONFIG_HIGH_RES_TIMERS)
859 unsigned long seq;
860#endif
829 spin_lock_irqsave(&smi_info->count_lock, flags); 861 spin_lock_irqsave(&smi_info->count_lock, flags);
830 smi_info->short_timeouts++; 862 smi_info->short_timeouts++;
831 spin_unlock_irqrestore(&smi_info->count_lock, flags); 863 spin_unlock_irqrestore(&smi_info->count_lock, flags);
832#if defined(CONFIG_HIGH_RES_TIMERS) 864#if defined(CONFIG_HIGH_RES_TIMERS)
833 read_lock(&xtime_lock); 865 do {
834 smi_info->si_timer.expires = jiffies; 866 seq = read_seqbegin_irqsave(&xtime_lock, flags);
835 smi_info->si_timer.sub_expires 867 smi_info->si_timer.expires = jiffies;
836 = get_arch_cycles(smi_info->si_timer.expires); 868 smi_info->si_timer.arch_cycle_expires
837 read_unlock(&xtime_lock); 869 = get_arch_cycles(smi_info->si_timer.expires);
870 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
838 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC); 871 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
839#else 872#else
840 smi_info->si_timer.expires = jiffies + 1; 873 smi_info->si_timer.expires = jiffies + 1;
@@ -845,7 +878,7 @@ static void smi_timeout(unsigned long data)
845 spin_unlock_irqrestore(&smi_info->count_lock, flags); 878 spin_unlock_irqrestore(&smi_info->count_lock, flags);
846 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 879 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
847#if defined(CONFIG_HIGH_RES_TIMERS) 880#if defined(CONFIG_HIGH_RES_TIMERS)
848 smi_info->si_timer.sub_expires = 0; 881 smi_info->si_timer.arch_cycle_expires = 0;
849#endif 882#endif
850 } 883 }
851 884
@@ -1014,7 +1047,7 @@ static int std_irq_setup(struct smi_info *info)
1014{ 1047{
1015 int rv; 1048 int rv;
1016 1049
1017 if (!info->irq) 1050 if (! info->irq)
1018 return 0; 1051 return 0;
1019 1052
1020 if (info->si_type == SI_BT) { 1053 if (info->si_type == SI_BT) {
@@ -1023,7 +1056,7 @@ static int std_irq_setup(struct smi_info *info)
1023 SA_INTERRUPT, 1056 SA_INTERRUPT,
1024 DEVICE_NAME, 1057 DEVICE_NAME,
1025 info); 1058 info);
1026 if (!rv) 1059 if (! rv)
1027 /* Enable the interrupt in the BT interface. */ 1060 /* Enable the interrupt in the BT interface. */
1028 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 1061 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1029 IPMI_BT_INTMASK_ENABLE_IRQ_BIT); 1062 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
@@ -1048,7 +1081,7 @@ static int std_irq_setup(struct smi_info *info)
1048 1081
1049static void std_irq_cleanup(struct smi_info *info) 1082static void std_irq_cleanup(struct smi_info *info)
1050{ 1083{
1051 if (!info->irq) 1084 if (! info->irq)
1052 return; 1085 return;
1053 1086
1054 if (info->si_type == SI_BT) 1087 if (info->si_type == SI_BT)
@@ -1121,7 +1154,7 @@ static int port_setup(struct smi_info *info)
1121 unsigned int *addr = info->io.info; 1154 unsigned int *addr = info->io.info;
1122 int mapsize; 1155 int mapsize;
1123 1156
1124 if (!addr || (!*addr)) 1157 if (! addr || (! *addr))
1125 return -ENODEV; 1158 return -ENODEV;
1126 1159
1127 info->io_cleanup = port_cleanup; 1160 info->io_cleanup = port_cleanup;
@@ -1164,15 +1197,15 @@ static int try_init_port(int intf_num, struct smi_info **new_info)
1164{ 1197{
1165 struct smi_info *info; 1198 struct smi_info *info;
1166 1199
1167 if (!ports[intf_num]) 1200 if (! ports[intf_num])
1168 return -ENODEV; 1201 return -ENODEV;
1169 1202
1170 if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE, 1203 if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1171 ports[intf_num])) 1204 ports[intf_num]))
1172 return -ENODEV; 1205 return -ENODEV;
1173 1206
1174 info = kmalloc(sizeof(*info), GFP_KERNEL); 1207 info = kmalloc(sizeof(*info), GFP_KERNEL);
1175 if (!info) { 1208 if (! info) {
1176 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n"); 1209 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1177 return -ENOMEM; 1210 return -ENOMEM;
1178 } 1211 }
@@ -1182,10 +1215,10 @@ static int try_init_port(int intf_num, struct smi_info **new_info)
1182 info->io.info = &(ports[intf_num]); 1215 info->io.info = &(ports[intf_num]);
1183 info->io.addr = NULL; 1216 info->io.addr = NULL;
1184 info->io.regspacing = regspacings[intf_num]; 1217 info->io.regspacing = regspacings[intf_num];
1185 if (!info->io.regspacing) 1218 if (! info->io.regspacing)
1186 info->io.regspacing = DEFAULT_REGSPACING; 1219 info->io.regspacing = DEFAULT_REGSPACING;
1187 info->io.regsize = regsizes[intf_num]; 1220 info->io.regsize = regsizes[intf_num];
1188 if (!info->io.regsize) 1221 if (! info->io.regsize)
1189 info->io.regsize = DEFAULT_REGSPACING; 1222 info->io.regsize = DEFAULT_REGSPACING;
1190 info->io.regshift = regshifts[intf_num]; 1223 info->io.regshift = regshifts[intf_num];
1191 info->irq = 0; 1224 info->irq = 0;
@@ -1270,7 +1303,7 @@ static int mem_setup(struct smi_info *info)
1270 unsigned long *addr = info->io.info; 1303 unsigned long *addr = info->io.info;
1271 int mapsize; 1304 int mapsize;
1272 1305
1273 if (!addr || (!*addr)) 1306 if (! addr || (! *addr))
1274 return -ENODEV; 1307 return -ENODEV;
1275 1308
1276 info->io_cleanup = mem_cleanup; 1309 info->io_cleanup = mem_cleanup;
@@ -1325,15 +1358,15 @@ static int try_init_mem(int intf_num, struct smi_info **new_info)
1325{ 1358{
1326 struct smi_info *info; 1359 struct smi_info *info;
1327 1360
1328 if (!addrs[intf_num]) 1361 if (! addrs[intf_num])
1329 return -ENODEV; 1362 return -ENODEV;
1330 1363
1331 if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE, 1364 if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1332 addrs[intf_num])) 1365 addrs[intf_num]))
1333 return -ENODEV; 1366 return -ENODEV;
1334 1367
1335 info = kmalloc(sizeof(*info), GFP_KERNEL); 1368 info = kmalloc(sizeof(*info), GFP_KERNEL);
1336 if (!info) { 1369 if (! info) {
1337 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n"); 1370 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1338 return -ENOMEM; 1371 return -ENOMEM;
1339 } 1372 }
@@ -1343,10 +1376,10 @@ static int try_init_mem(int intf_num, struct smi_info **new_info)
1343 info->io.info = &addrs[intf_num]; 1376 info->io.info = &addrs[intf_num];
1344 info->io.addr = NULL; 1377 info->io.addr = NULL;
1345 info->io.regspacing = regspacings[intf_num]; 1378 info->io.regspacing = regspacings[intf_num];
1346 if (!info->io.regspacing) 1379 if (! info->io.regspacing)
1347 info->io.regspacing = DEFAULT_REGSPACING; 1380 info->io.regspacing = DEFAULT_REGSPACING;
1348 info->io.regsize = regsizes[intf_num]; 1381 info->io.regsize = regsizes[intf_num];
1349 if (!info->io.regsize) 1382 if (! info->io.regsize)
1350 info->io.regsize = DEFAULT_REGSPACING; 1383 info->io.regsize = DEFAULT_REGSPACING;
1351 info->io.regshift = regshifts[intf_num]; 1384 info->io.regshift = regshifts[intf_num];
1352 info->irq = 0; 1385 info->irq = 0;
@@ -1404,7 +1437,7 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1404{ 1437{
1405 acpi_status status; 1438 acpi_status status;
1406 1439
1407 if (!info->irq) 1440 if (! info->irq)
1408 return 0; 1441 return 0;
1409 1442
1410 /* FIXME - is level triggered right? */ 1443 /* FIXME - is level triggered right? */
@@ -1428,7 +1461,7 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1428 1461
1429static void acpi_gpe_irq_cleanup(struct smi_info *info) 1462static void acpi_gpe_irq_cleanup(struct smi_info *info)
1430{ 1463{
1431 if (!info->irq) 1464 if (! info->irq)
1432 return; 1465 return;
1433 1466
1434 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); 1467 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
@@ -1504,10 +1537,10 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1504 addr_space = IPMI_MEM_ADDR_SPACE; 1537 addr_space = IPMI_MEM_ADDR_SPACE;
1505 else 1538 else
1506 addr_space = IPMI_IO_ADDR_SPACE; 1539 addr_space = IPMI_IO_ADDR_SPACE;
1507 if (!is_new_interface(-1, addr_space, spmi->addr.address)) 1540 if (! is_new_interface(-1, addr_space, spmi->addr.address))
1508 return -ENODEV; 1541 return -ENODEV;
1509 1542
1510 if (!spmi->addr.register_bit_width) { 1543 if (! spmi->addr.register_bit_width) {
1511 acpi_failure = 1; 1544 acpi_failure = 1;
1512 return -ENODEV; 1545 return -ENODEV;
1513 } 1546 }
@@ -1534,7 +1567,7 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info)
1534 } 1567 }
1535 1568
1536 info = kmalloc(sizeof(*info), GFP_KERNEL); 1569 info = kmalloc(sizeof(*info), GFP_KERNEL);
1537 if (!info) { 1570 if (! info) {
1538 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); 1571 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1539 return -ENOMEM; 1572 return -ENOMEM;
1540 } 1573 }
@@ -1610,22 +1643,15 @@ typedef struct dmi_ipmi_data
1610static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS]; 1643static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1611static int dmi_data_entries; 1644static int dmi_data_entries;
1612 1645
1613typedef struct dmi_header 1646static int __init decode_dmi(struct dmi_header *dm, int intf_num)
1614{
1615 u8 type;
1616 u8 length;
1617 u16 handle;
1618} dmi_header_t;
1619
1620static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1621{ 1647{
1622 u8 __iomem *data = (u8 __iomem *)dm; 1648 u8 *data = (u8 *)dm;
1623 unsigned long base_addr; 1649 unsigned long base_addr;
1624 u8 reg_spacing; 1650 u8 reg_spacing;
1625 u8 len = readb(&dm->length); 1651 u8 len = dm->length;
1626 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; 1652 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1627 1653
1628 ipmi_data->type = readb(&data[4]); 1654 ipmi_data->type = data[4];
1629 1655
1630 memcpy(&base_addr, data+8, sizeof(unsigned long)); 1656 memcpy(&base_addr, data+8, sizeof(unsigned long));
1631 if (len >= 0x11) { 1657 if (len >= 0x11) {
@@ -1640,12 +1666,12 @@ static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1640 } 1666 }
1641 /* If bit 4 of byte 0x10 is set, then the lsb for the address 1667 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1642 is odd. */ 1668 is odd. */
1643 ipmi_data->base_addr = base_addr | ((readb(&data[0x10]) & 0x10) >> 4); 1669 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1644 1670
1645 ipmi_data->irq = readb(&data[0x11]); 1671 ipmi_data->irq = data[0x11];
1646 1672
1647 /* The top two bits of byte 0x10 hold the register spacing. */ 1673 /* The top two bits of byte 0x10 hold the register spacing. */
1648 reg_spacing = (readb(&data[0x10]) & 0xC0) >> 6; 1674 reg_spacing = (data[0x10] & 0xC0) >> 6;
1649 switch(reg_spacing){ 1675 switch(reg_spacing){
1650 case 0x00: /* Byte boundaries */ 1676 case 0x00: /* Byte boundaries */
1651 ipmi_data->offset = 1; 1677 ipmi_data->offset = 1;
@@ -1673,7 +1699,7 @@ static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1673 ipmi_data->offset = 1; 1699 ipmi_data->offset = 1;
1674 } 1700 }
1675 1701
1676 ipmi_data->slave_addr = readb(&data[6]); 1702 ipmi_data->slave_addr = data[6];
1677 1703
1678 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) { 1704 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1679 dmi_data_entries++; 1705 dmi_data_entries++;
@@ -1685,94 +1711,29 @@ static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
1685 return -1; 1711 return -1;
1686} 1712}
1687 1713
1688static int dmi_table(u32 base, int len, int num) 1714static void __init dmi_find_bmc(void)
1689{ 1715{
1690 u8 __iomem *buf; 1716 struct dmi_device *dev = NULL;
1691 struct dmi_header __iomem *dm;
1692 u8 __iomem *data;
1693 int i=1;
1694 int status=-1;
1695 int intf_num = 0; 1717 int intf_num = 0;
1696 1718
1697 buf = ioremap(base, len); 1719 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
1698 if(buf==NULL) 1720 if (intf_num >= SI_MAX_DRIVERS)
1699 return -1; 1721 break;
1700
1701 data = buf;
1702
1703 while(i<num && (data - buf) < len)
1704 {
1705 dm=(dmi_header_t __iomem *)data;
1706
1707 if((data-buf+readb(&dm->length)) >= len)
1708 break;
1709
1710 if (readb(&dm->type) == 38) {
1711 if (decode_dmi(dm, intf_num) == 0) {
1712 intf_num++;
1713 if (intf_num >= SI_MAX_DRIVERS)
1714 break;
1715 }
1716 }
1717
1718 data+=readb(&dm->length);
1719 while((data-buf) < len && (readb(data)||readb(data+1)))
1720 data++;
1721 data+=2;
1722 i++;
1723 }
1724 iounmap(buf);
1725
1726 return status;
1727}
1728
1729static inline int dmi_checksum(u8 *buf)
1730{
1731 u8 sum=0;
1732 int a;
1733
1734 for(a=0; a<15; a++)
1735 sum+=buf[a];
1736 return (sum==0);
1737}
1738
1739static int dmi_decode(void)
1740{
1741 u8 buf[15];
1742 u32 fp=0xF0000;
1743
1744#ifdef CONFIG_SIMNOW
1745 return -1;
1746#endif
1747
1748 while(fp < 0xFFFFF)
1749 {
1750 isa_memcpy_fromio(buf, fp, 15);
1751 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1752 {
1753 u16 num=buf[13]<<8|buf[12];
1754 u16 len=buf[7]<<8|buf[6];
1755 u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1756 1722
1757 if(dmi_table(base, len, num) == 0) 1723 decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
1758 return 0;
1759 }
1760 fp+=16;
1761 } 1724 }
1762
1763 return -1;
1764} 1725}
1765 1726
1766static int try_init_smbios(int intf_num, struct smi_info **new_info) 1727static int try_init_smbios(int intf_num, struct smi_info **new_info)
1767{ 1728{
1768 struct smi_info *info; 1729 struct smi_info *info;
1769 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; 1730 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1770 char *io_type; 1731 char *io_type;
1771 1732
1772 if (intf_num >= dmi_data_entries) 1733 if (intf_num >= dmi_data_entries)
1773 return -ENODEV; 1734 return -ENODEV;
1774 1735
1775 switch(ipmi_data->type) { 1736 switch (ipmi_data->type) {
1776 case 0x01: /* KCS */ 1737 case 0x01: /* KCS */
1777 si_type[intf_num] = "kcs"; 1738 si_type[intf_num] = "kcs";
1778 break; 1739 break;
@@ -1787,7 +1748,7 @@ static int try_init_smbios(int intf_num, struct smi_info **new_info)
1787 } 1748 }
1788 1749
1789 info = kmalloc(sizeof(*info), GFP_KERNEL); 1750 info = kmalloc(sizeof(*info), GFP_KERNEL);
1790 if (!info) { 1751 if (! info) {
1791 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n"); 1752 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1792 return -ENOMEM; 1753 return -ENOMEM;
1793 } 1754 }
@@ -1811,7 +1772,7 @@ static int try_init_smbios(int intf_num, struct smi_info **new_info)
1811 1772
1812 regspacings[intf_num] = ipmi_data->offset; 1773 regspacings[intf_num] = ipmi_data->offset;
1813 info->io.regspacing = regspacings[intf_num]; 1774 info->io.regspacing = regspacings[intf_num];
1814 if (!info->io.regspacing) 1775 if (! info->io.regspacing)
1815 info->io.regspacing = DEFAULT_REGSPACING; 1776 info->io.regspacing = DEFAULT_REGSPACING;
1816 info->io.regsize = DEFAULT_REGSPACING; 1777 info->io.regsize = DEFAULT_REGSPACING;
1817 info->io.regshift = regshifts[intf_num]; 1778 info->io.regshift = regshifts[intf_num];
@@ -1853,14 +1814,14 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1853 1814
1854 pci_smic_checked = 1; 1815 pci_smic_checked = 1;
1855 1816
1856 if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, 1817 pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL);
1857 NULL))) 1818 if (! pci_dev) {
1858 ; 1819 pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL);
1859 else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) && 1820 if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID))
1860 pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID) 1821 fe_rmc = 1;
1861 fe_rmc = 1; 1822 else
1862 else 1823 return -ENODEV;
1863 return -ENODEV; 1824 }
1864 1825
1865 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr); 1826 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1866 if (error) 1827 if (error)
@@ -1873,7 +1834,7 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1873 } 1834 }
1874 1835
1875 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */ 1836 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1876 if (!(base_addr & 0x0001)) 1837 if (! (base_addr & 0x0001))
1877 { 1838 {
1878 pci_dev_put(pci_dev); 1839 pci_dev_put(pci_dev);
1879 printk(KERN_ERR 1840 printk(KERN_ERR
@@ -1883,17 +1844,17 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1883 } 1844 }
1884 1845
1885 base_addr &= 0xFFFE; 1846 base_addr &= 0xFFFE;
1886 if (!fe_rmc) 1847 if (! fe_rmc)
1887 /* Data register starts at base address + 1 in eRMC */ 1848 /* Data register starts at base address + 1 in eRMC */
1888 ++base_addr; 1849 ++base_addr;
1889 1850
1890 if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) { 1851 if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1891 pci_dev_put(pci_dev); 1852 pci_dev_put(pci_dev);
1892 return -ENODEV; 1853 return -ENODEV;
1893 } 1854 }
1894 1855
1895 info = kmalloc(sizeof(*info), GFP_KERNEL); 1856 info = kmalloc(sizeof(*info), GFP_KERNEL);
1896 if (!info) { 1857 if (! info) {
1897 pci_dev_put(pci_dev); 1858 pci_dev_put(pci_dev);
1898 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n"); 1859 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1899 return -ENOMEM; 1860 return -ENOMEM;
@@ -1904,7 +1865,7 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1904 ports[intf_num] = base_addr; 1865 ports[intf_num] = base_addr;
1905 info->io.info = &(ports[intf_num]); 1866 info->io.info = &(ports[intf_num]);
1906 info->io.regspacing = regspacings[intf_num]; 1867 info->io.regspacing = regspacings[intf_num];
1907 if (!info->io.regspacing) 1868 if (! info->io.regspacing)
1908 info->io.regspacing = DEFAULT_REGSPACING; 1869 info->io.regspacing = DEFAULT_REGSPACING;
1909 info->io.regsize = DEFAULT_REGSPACING; 1870 info->io.regsize = DEFAULT_REGSPACING;
1910 info->io.regshift = regshifts[intf_num]; 1871 info->io.regshift = regshifts[intf_num];
@@ -1925,7 +1886,7 @@ static int find_pci_smic(int intf_num, struct smi_info **new_info)
1925static int try_init_plug_and_play(int intf_num, struct smi_info **new_info) 1886static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1926{ 1887{
1927#ifdef CONFIG_PCI 1888#ifdef CONFIG_PCI
1928 if (find_pci_smic(intf_num, new_info)==0) 1889 if (find_pci_smic(intf_num, new_info) == 0)
1929 return 0; 1890 return 0;
1930#endif 1891#endif
1931 /* Include other methods here. */ 1892 /* Include other methods here. */
@@ -1943,7 +1904,7 @@ static int try_get_dev_id(struct smi_info *smi_info)
1943 int rv = 0; 1904 int rv = 0;
1944 1905
1945 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); 1906 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1946 if (!resp) 1907 if (! resp)
1947 return -ENOMEM; 1908 return -ENOMEM;
1948 1909
1949 /* Do a Get Device ID command, since it comes back with some 1910 /* Do a Get Device ID command, since it comes back with some
@@ -1992,11 +1953,8 @@ static int try_get_dev_id(struct smi_info *smi_info)
1992 } 1953 }
1993 1954
1994 /* Record info from the get device id, in case we need it. */ 1955 /* Record info from the get device id, in case we need it. */
1995 smi_info->ipmi_si_dev_rev = resp[4] & 0xf; 1956 memcpy(&smi_info->device_id, &resp[3],
1996 smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f; 1957 min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
1997 smi_info->ipmi_si_fw_rev_minor = resp[6];
1998 smi_info->ipmi_version_major = resp[7] & 0xf;
1999 smi_info->ipmi_version_minor = resp[7] >> 4;
2000 1958
2001 out: 1959 out:
2002 kfree(resp); 1960 kfree(resp);
@@ -2028,7 +1986,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2028 struct smi_info *smi = data; 1986 struct smi_info *smi = data;
2029 1987
2030 out += sprintf(out, "interrupts_enabled: %d\n", 1988 out += sprintf(out, "interrupts_enabled: %d\n",
2031 smi->irq && !smi->interrupt_disabled); 1989 smi->irq && ! smi->interrupt_disabled);
2032 out += sprintf(out, "short_timeouts: %ld\n", 1990 out += sprintf(out, "short_timeouts: %ld\n",
2033 smi->short_timeouts); 1991 smi->short_timeouts);
2034 out += sprintf(out, "long_timeouts: %ld\n", 1992 out += sprintf(out, "long_timeouts: %ld\n",
@@ -2057,6 +2015,73 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
2057 return (out - ((char *) page)); 2015 return (out - ((char *) page));
2058} 2016}
2059 2017
2018/*
2019 * oem_data_avail_to_receive_msg_avail
2020 * @info - smi_info structure with msg_flags set
2021 *
2022 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2023 * Returns 1 indicating need to re-run handle_flags().
2024 */
2025static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2026{
2027 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2028 RECEIVE_MSG_AVAIL);
2029 return 1;
2030}
2031
2032/*
2033 * setup_dell_poweredge_oem_data_handler
2034 * @info - smi_info.device_id must be populated
2035 *
2036 * Systems that match, but have firmware version < 1.40 may assert
2037 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2038 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2039 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2040 * as RECEIVE_MSG_AVAIL instead.
2041 *
2042 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2043 * assert the OEM[012] bits, and if it did, the driver would have to
2044 * change to handle that properly, we don't actually check for the
2045 * firmware version.
2046 * Device ID = 0x20 BMC on PowerEdge 8G servers
2047 * Device Revision = 0x80
2048 * Firmware Revision1 = 0x01 BMC version 1.40
2049 * Firmware Revision2 = 0x40 BCD encoded
2050 * IPMI Version = 0x51 IPMI 1.5
2051 * Manufacturer ID = A2 02 00 Dell IANA
2052 *
2053 */
2054#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2055#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2056#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2057#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
2058static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2059{
2060 struct ipmi_device_id *id = &smi_info->device_id;
2061 const char mfr[3]=DELL_IANA_MFR_ID;
2062 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))
2063 && (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID)
2064 && (id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV)
2065 && (id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION))
2066 {
2067 smi_info->oem_data_avail_handler =
2068 oem_data_avail_to_receive_msg_avail;
2069 }
2070}
2071
2072/*
2073 * setup_oem_data_handler
2074 * @info - smi_info.device_id must be filled in already
2075 *
2076 * Fills in smi_info.device_id.oem_data_available_handler
2077 * when we know what function to use there.
2078 */
2079
2080static void setup_oem_data_handler(struct smi_info *smi_info)
2081{
2082 setup_dell_poweredge_oem_data_handler(smi_info);
2083}
2084
2060/* Returns 0 if initialized, or negative on an error. */ 2085/* Returns 0 if initialized, or negative on an error. */
2061static int init_one_smi(int intf_num, struct smi_info **smi) 2086static int init_one_smi(int intf_num, struct smi_info **smi)
2062{ 2087{
@@ -2068,19 +2093,15 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2068 if (rv) 2093 if (rv)
2069 rv = try_init_port(intf_num, &new_smi); 2094 rv = try_init_port(intf_num, &new_smi);
2070#ifdef CONFIG_ACPI_INTERPRETER 2095#ifdef CONFIG_ACPI_INTERPRETER
2071 if ((rv) && (si_trydefaults)) { 2096 if (rv && si_trydefaults)
2072 rv = try_init_acpi(intf_num, &new_smi); 2097 rv = try_init_acpi(intf_num, &new_smi);
2073 }
2074#endif 2098#endif
2075#ifdef CONFIG_X86 2099#ifdef CONFIG_X86
2076 if ((rv) && (si_trydefaults)) { 2100 if (rv && si_trydefaults)
2077 rv = try_init_smbios(intf_num, &new_smi); 2101 rv = try_init_smbios(intf_num, &new_smi);
2078 }
2079#endif 2102#endif
2080 if ((rv) && (si_trydefaults)) { 2103 if (rv && si_trydefaults)
2081 rv = try_init_plug_and_play(intf_num, &new_smi); 2104 rv = try_init_plug_and_play(intf_num, &new_smi);
2082 }
2083
2084 2105
2085 if (rv) 2106 if (rv)
2086 return rv; 2107 return rv;
@@ -2090,7 +2111,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2090 new_smi->si_sm = NULL; 2111 new_smi->si_sm = NULL;
2091 new_smi->handlers = NULL; 2112 new_smi->handlers = NULL;
2092 2113
2093 if (!new_smi->irq_setup) { 2114 if (! new_smi->irq_setup) {
2094 new_smi->irq = irqs[intf_num]; 2115 new_smi->irq = irqs[intf_num];
2095 new_smi->irq_setup = std_irq_setup; 2116 new_smi->irq_setup = std_irq_setup;
2096 new_smi->irq_cleanup = std_irq_cleanup; 2117 new_smi->irq_cleanup = std_irq_cleanup;
@@ -2124,7 +2145,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2124 2145
2125 /* Allocate the state machine's data and initialize it. */ 2146 /* Allocate the state machine's data and initialize it. */
2126 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2147 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2127 if (!new_smi->si_sm) { 2148 if (! new_smi->si_sm) {
2128 printk(" Could not allocate state machine memory\n"); 2149 printk(" Could not allocate state machine memory\n");
2129 rv = -ENOMEM; 2150 rv = -ENOMEM;
2130 goto out_err; 2151 goto out_err;
@@ -2155,6 +2176,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2155 if (rv) 2176 if (rv)
2156 goto out_err; 2177 goto out_err;
2157 2178
2179 setup_oem_data_handler(new_smi);
2180
2158 /* Try to claim any interrupts. */ 2181 /* Try to claim any interrupts. */
2159 new_smi->irq_setup(new_smi); 2182 new_smi->irq_setup(new_smi);
2160 2183
@@ -2188,8 +2211,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2188 2211
2189 rv = ipmi_register_smi(&handlers, 2212 rv = ipmi_register_smi(&handlers,
2190 new_smi, 2213 new_smi,
2191 new_smi->ipmi_version_major, 2214 ipmi_version_major(&new_smi->device_id),
2192 new_smi->ipmi_version_minor, 2215 ipmi_version_minor(&new_smi->device_id),
2193 new_smi->slave_addr, 2216 new_smi->slave_addr,
2194 &(new_smi->intf)); 2217 &(new_smi->intf));
2195 if (rv) { 2218 if (rv) {
@@ -2230,7 +2253,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2230 2253
2231 /* Wait for the timer to stop. This avoids problems with race 2254 /* Wait for the timer to stop. This avoids problems with race
2232 conditions removing the timer here. */ 2255 conditions removing the timer here. */
2233 while (!new_smi->timer_stopped) { 2256 while (! new_smi->timer_stopped) {
2234 set_current_state(TASK_UNINTERRUPTIBLE); 2257 set_current_state(TASK_UNINTERRUPTIBLE);
2235 schedule_timeout(1); 2258 schedule_timeout(1);
2236 } 2259 }
@@ -2270,7 +2293,7 @@ static __init int init_ipmi_si(void)
2270 /* Parse out the si_type string into its components. */ 2293 /* Parse out the si_type string into its components. */
2271 str = si_type_str; 2294 str = si_type_str;
2272 if (*str != '\0') { 2295 if (*str != '\0') {
2273 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) { 2296 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
2274 si_type[i] = str; 2297 si_type[i] = str;
2275 str = strchr(str, ','); 2298 str = strchr(str, ',');
2276 if (str) { 2299 if (str) {
@@ -2282,22 +2305,14 @@ static __init int init_ipmi_si(void)
2282 } 2305 }
2283 } 2306 }
2284 2307
2285 printk(KERN_INFO "IPMI System Interface driver version " 2308 printk(KERN_INFO "IPMI System Interface driver.\n");
2286 IPMI_SI_VERSION);
2287 if (kcs_smi_handlers.version)
2288 printk(", KCS version %s", kcs_smi_handlers.version);
2289 if (smic_smi_handlers.version)
2290 printk(", SMIC version %s", smic_smi_handlers.version);
2291 if (bt_smi_handlers.version)
2292 printk(", BT version %s", bt_smi_handlers.version);
2293 printk("\n");
2294 2309
2295#ifdef CONFIG_X86 2310#ifdef CONFIG_X86
2296 dmi_decode(); 2311 dmi_find_bmc();
2297#endif 2312#endif
2298 2313
2299 rv = init_one_smi(0, &(smi_infos[pos])); 2314 rv = init_one_smi(0, &(smi_infos[pos]));
2300 if (rv && !ports[0] && si_trydefaults) { 2315 if (rv && ! ports[0] && si_trydefaults) {
2301 /* If we are trying defaults and the initial port is 2316 /* If we are trying defaults and the initial port is
2302 not set, then set it. */ 2317 not set, then set it. */
2303 si_type[0] = "kcs"; 2318 si_type[0] = "kcs";
@@ -2319,7 +2334,7 @@ static __init int init_ipmi_si(void)
2319 if (rv == 0) 2334 if (rv == 0)
2320 pos++; 2335 pos++;
2321 2336
2322 for (i=1; i < SI_MAX_PARMS; i++) { 2337 for (i = 1; i < SI_MAX_PARMS; i++) {
2323 rv = init_one_smi(i, &(smi_infos[pos])); 2338 rv = init_one_smi(i, &(smi_infos[pos]));
2324 if (rv == 0) 2339 if (rv == 0)
2325 pos++; 2340 pos++;
@@ -2361,14 +2376,14 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2361 2376
2362 /* Wait for the timer to stop. This avoids problems with race 2377 /* Wait for the timer to stop. This avoids problems with race
2363 conditions removing the timer here. */ 2378 conditions removing the timer here. */
2364 while (!to_clean->timer_stopped) { 2379 while (! to_clean->timer_stopped) {
2365 set_current_state(TASK_UNINTERRUPTIBLE); 2380 set_current_state(TASK_UNINTERRUPTIBLE);
2366 schedule_timeout(1); 2381 schedule_timeout(1);
2367 } 2382 }
2368 2383
2369 /* Interrupts and timeouts are stopped, now make sure the 2384 /* Interrupts and timeouts are stopped, now make sure the
2370 interface is in a clean state. */ 2385 interface is in a clean state. */
2371 while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) { 2386 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
2372 poll(to_clean); 2387 poll(to_clean);
2373 set_current_state(TASK_UNINTERRUPTIBLE); 2388 set_current_state(TASK_UNINTERRUPTIBLE);
2374 schedule_timeout(1); 2389 schedule_timeout(1);
@@ -2392,13 +2407,15 @@ static __exit void cleanup_ipmi_si(void)
2392{ 2407{
2393 int i; 2408 int i;
2394 2409
2395 if (!initialized) 2410 if (! initialized)
2396 return; 2411 return;
2397 2412
2398 for (i=0; i<SI_MAX_DRIVERS; i++) { 2413 for (i = 0; i < SI_MAX_DRIVERS; i++) {
2399 cleanup_one_si(smi_infos[i]); 2414 cleanup_one_si(smi_infos[i]);
2400 } 2415 }
2401} 2416}
2402module_exit(cleanup_ipmi_si); 2417module_exit(cleanup_ipmi_si);
2403 2418
2404MODULE_LICENSE("GPL"); 2419MODULE_LICENSE("GPL");
2420MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
2421MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index ae18747e670b..add2aa2732f0 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -46,8 +46,6 @@
46#include <linux/ipmi_msgdefs.h> /* for completion codes */ 46#include <linux/ipmi_msgdefs.h> /* for completion codes */
47#include "ipmi_si_sm.h" 47#include "ipmi_si_sm.h"
48 48
49#define IPMI_SMIC_VERSION "v33"
50
51/* smic_debug is a bit-field 49/* smic_debug is a bit-field
52 * SMIC_DEBUG_ENABLE - turned on for now 50 * SMIC_DEBUG_ENABLE - turned on for now
53 * SMIC_DEBUG_MSG - commands and their responses 51 * SMIC_DEBUG_MSG - commands and their responses
@@ -588,7 +586,6 @@ static int smic_size(void)
588 586
589struct si_sm_handlers smic_smi_handlers = 587struct si_sm_handlers smic_smi_handlers =
590{ 588{
591 .version = IPMI_SMIC_VERSION,
592 .init_data = init_smic_data, 589 .init_data = init_smic_data,
593 .start_transaction = start_smic_transaction, 590 .start_transaction = start_smic_transaction,
594 .get_result = smic_get_result, 591 .get_result = smic_get_result,
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index d35a953961cb..e71aaae855ad 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -53,8 +53,6 @@
53 53
54#define PFX "IPMI Watchdog: " 54#define PFX "IPMI Watchdog: "
55 55
56#define IPMI_WATCHDOG_VERSION "v33"
57
58/* 56/*
59 * The IPMI command/response information for the watchdog timer. 57 * The IPMI command/response information for the watchdog timer.
60 */ 58 */
@@ -259,7 +257,7 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
259 257
260 data[1] = 0; 258 data[1] = 0;
261 WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state); 259 WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
262 if (pretimeout > 0) { 260 if ((pretimeout > 0) && (ipmi_watchdog_state != WDOG_TIMEOUT_NONE)) {
263 WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val); 261 WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
264 data[2] = pretimeout; 262 data[2] = pretimeout;
265 } else { 263 } else {
@@ -659,19 +657,18 @@ static ssize_t ipmi_read(struct file *file,
659 657
660static int ipmi_open(struct inode *ino, struct file *filep) 658static int ipmi_open(struct inode *ino, struct file *filep)
661{ 659{
662 switch (iminor(ino)) 660 switch (iminor(ino)) {
663 { 661 case WATCHDOG_MINOR:
664 case WATCHDOG_MINOR: 662 if (test_and_set_bit(0, &ipmi_wdog_open))
665 if(test_and_set_bit(0, &ipmi_wdog_open))
666 return -EBUSY; 663 return -EBUSY;
667 664
668 /* Don't start the timer now, let it start on the 665 /* Don't start the timer now, let it start on the
669 first heartbeat. */ 666 first heartbeat. */
670 ipmi_start_timer_on_heartbeat = 1; 667 ipmi_start_timer_on_heartbeat = 1;
671 return nonseekable_open(ino, filep); 668 return nonseekable_open(ino, filep);
672 669
673 default: 670 default:
674 return (-ENODEV); 671 return (-ENODEV);
675 } 672 }
676} 673}
677 674
@@ -817,15 +814,19 @@ static void ipmi_register_watchdog(int ipmi_intf)
817static int 814static int
818ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled) 815ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled)
819{ 816{
817 /* If we are not expecting a timeout, ignore it. */
818 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
819 return NOTIFY_DONE;
820
820 /* If no one else handled the NMI, we assume it was the IPMI 821 /* If no one else handled the NMI, we assume it was the IPMI
821 watchdog. */ 822 watchdog. */
822 if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) 823 if ((!handled) && (preop_val == WDOG_PREOP_PANIC)) {
824 /* On some machines, the heartbeat will give
825 an error and not work unless we re-enable
826 the timer. So do so. */
827 pretimeout_since_last_heartbeat = 1;
823 panic(PFX "pre-timeout"); 828 panic(PFX "pre-timeout");
824 829 }
825 /* On some machines, the heartbeat will give
826 an error and not work unless we re-enable
827 the timer. So do so. */
828 pretimeout_since_last_heartbeat = 1;
829 830
830 return NOTIFY_DONE; 831 return NOTIFY_DONE;
831} 832}
@@ -924,9 +925,6 @@ static int __init ipmi_wdog_init(void)
924{ 925{
925 int rv; 926 int rv;
926 927
927 printk(KERN_INFO PFX "driver version "
928 IPMI_WATCHDOG_VERSION "\n");
929
930 if (strcmp(action, "reset") == 0) { 928 if (strcmp(action, "reset") == 0) {
931 action_val = WDOG_TIMEOUT_RESET; 929 action_val = WDOG_TIMEOUT_RESET;
932 } else if (strcmp(action, "none") == 0) { 930 } else if (strcmp(action, "none") == 0) {
@@ -1011,6 +1009,8 @@ static int __init ipmi_wdog_init(void)
1011 register_reboot_notifier(&wdog_reboot_notifier); 1009 register_reboot_notifier(&wdog_reboot_notifier);
1012 notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier); 1010 notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
1013 1011
1012 printk(KERN_INFO PFX "driver initialized\n");
1013
1014 return 0; 1014 return 0;
1015} 1015}
1016 1016
@@ -1062,3 +1062,5 @@ static void __exit ipmi_wdog_exit(void)
1062module_exit(ipmi_wdog_exit); 1062module_exit(ipmi_wdog_exit);
1063module_init(ipmi_wdog_init); 1063module_init(ipmi_wdog_init);
1064MODULE_LICENSE("GPL"); 1064MODULE_LICENSE("GPL");
1065MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
1066MODULE_DESCRIPTION("watchdog timer based upon the IPMI interface.");
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 115dbb35334b..3fa64c631108 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -750,7 +750,7 @@ static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
750 750
751 dev->soft = NULL; 751 dev->soft = NULL;
752 752
753 soft = kcalloc(1, sizeof(struct mbcs_soft), GFP_KERNEL); 753 soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
754 if (soft == NULL) 754 if (soft == NULL)
755 return -ENOMEM; 755 return -ENOMEM;
756 756
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 850a78c9c4bc..f182752fe918 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -35,10 +35,6 @@
35# include <linux/efi.h> 35# include <linux/efi.h>
36#endif 36#endif
37 37
38#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
39extern void tapechar_init(void);
40#endif
41
42/* 38/*
43 * Architectures vary in how they handle caching for addresses 39 * Architectures vary in how they handle caching for addresses
44 * outside of main memory. 40 * outside of main memory.
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 931efd58f87a..0c8375165e29 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -63,8 +63,6 @@ static DECLARE_MUTEX(misc_sem);
63#define DYNAMIC_MINORS 64 /* like dynamic majors */ 63#define DYNAMIC_MINORS 64 /* like dynamic majors */
64static unsigned char misc_minors[DYNAMIC_MINORS / 8]; 64static unsigned char misc_minors[DYNAMIC_MINORS / 8];
65 65
66extern int rtc_DP8570A_init(void);
67extern int rtc_MK48T08_init(void);
68extern int pmu_device_init(void); 66extern int pmu_device_init(void);
69 67
70#ifdef CONFIG_PROC_FS 68#ifdef CONFIG_PROC_FS
@@ -303,12 +301,7 @@ static int __init misc_init(void)
303 misc_class = class_create(THIS_MODULE, "misc"); 301 misc_class = class_create(THIS_MODULE, "misc");
304 if (IS_ERR(misc_class)) 302 if (IS_ERR(misc_class))
305 return PTR_ERR(misc_class); 303 return PTR_ERR(misc_class);
306#ifdef CONFIG_MVME16x 304
307 rtc_MK48T08_init();
308#endif
309#ifdef CONFIG_BVME6000
310 rtc_DP8570A_init();
311#endif
312 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) { 305 if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
313 printk("unable to get major %d for misc devices\n", 306 printk("unable to get major %d for misc devices\n",
314 MISC_MAJOR); 307 MISC_MAJOR);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index cefbe985e55c..36ae9ad2598c 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -98,12 +98,13 @@ MODULE_PARM_DESC(useinput,
98 98
99#define SONYPI_DEVICE_MODEL_TYPE1 1 99#define SONYPI_DEVICE_MODEL_TYPE1 1
100#define SONYPI_DEVICE_MODEL_TYPE2 2 100#define SONYPI_DEVICE_MODEL_TYPE2 2
101#define SONYPI_DEVICE_MODEL_TYPE3 3
101 102
102/* type1 models use those */ 103/* type1 models use those */
103#define SONYPI_IRQ_PORT 0x8034 104#define SONYPI_IRQ_PORT 0x8034
104#define SONYPI_IRQ_SHIFT 22 105#define SONYPI_IRQ_SHIFT 22
105#define SONYPI_BASE 0x50 106#define SONYPI_TYPE1_BASE 0x50
106#define SONYPI_G10A (SONYPI_BASE+0x14) 107#define SONYPI_G10A (SONYPI_TYPE1_BASE+0x14)
107#define SONYPI_TYPE1_REGION_SIZE 0x08 108#define SONYPI_TYPE1_REGION_SIZE 0x08
108#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04 109#define SONYPI_TYPE1_EVTYPE_OFFSET 0x04
109 110
@@ -114,6 +115,13 @@ MODULE_PARM_DESC(useinput,
114#define SONYPI_TYPE2_REGION_SIZE 0x20 115#define SONYPI_TYPE2_REGION_SIZE 0x20
115#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12 116#define SONYPI_TYPE2_EVTYPE_OFFSET 0x12
116 117
118/* type3 series specifics */
119#define SONYPI_TYPE3_BASE 0x40
120#define SONYPI_TYPE3_GID2 (SONYPI_TYPE3_BASE+0x48) /* 16 bits */
121#define SONYPI_TYPE3_MISC (SONYPI_TYPE3_BASE+0x6d) /* 8 bits */
122#define SONYPI_TYPE3_REGION_SIZE 0x20
123#define SONYPI_TYPE3_EVTYPE_OFFSET 0x12
124
117/* battery / brightness addresses */ 125/* battery / brightness addresses */
118#define SONYPI_BAT_FLAGS 0x81 126#define SONYPI_BAT_FLAGS 0x81
119#define SONYPI_LCD_LIGHT 0x96 127#define SONYPI_LCD_LIGHT 0x96
@@ -159,6 +167,10 @@ static struct sonypi_ioport_list sonypi_type2_ioport_list[] = {
159 { 0x0, 0x0 } 167 { 0x0, 0x0 }
160}; 168};
161 169
170/* same as in type 2 models */
171static struct sonypi_ioport_list *sonypi_type3_ioport_list =
172 sonypi_type2_ioport_list;
173
162/* The set of possible interrupts */ 174/* The set of possible interrupts */
163struct sonypi_irq_list { 175struct sonypi_irq_list {
164 u16 irq; 176 u16 irq;
@@ -180,6 +192,9 @@ static struct sonypi_irq_list sonypi_type2_irq_list[] = {
180 { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */ 192 { 0, 0x00 } /* no IRQ, 0x00 in SIRQ in AML */
181}; 193};
182 194
195/* same as in type2 models */
196static struct sonypi_irq_list *sonypi_type3_irq_list = sonypi_type2_irq_list;
197
183#define SONYPI_CAMERA_BRIGHTNESS 0 198#define SONYPI_CAMERA_BRIGHTNESS 0
184#define SONYPI_CAMERA_CONTRAST 1 199#define SONYPI_CAMERA_CONTRAST 1
185#define SONYPI_CAMERA_HUE 2 200#define SONYPI_CAMERA_HUE 2
@@ -223,6 +238,7 @@ static struct sonypi_irq_list sonypi_type2_irq_list[] = {
223#define SONYPI_MEYE_MASK 0x00000400 238#define SONYPI_MEYE_MASK 0x00000400
224#define SONYPI_MEMORYSTICK_MASK 0x00000800 239#define SONYPI_MEMORYSTICK_MASK 0x00000800
225#define SONYPI_BATTERY_MASK 0x00001000 240#define SONYPI_BATTERY_MASK 0x00001000
241#define SONYPI_WIRELESS_MASK 0x00002000
226 242
227struct sonypi_event { 243struct sonypi_event {
228 u8 data; 244 u8 data;
@@ -305,6 +321,13 @@ static struct sonypi_event sonypi_blueev[] = {
305 { 0, 0 } 321 { 0, 0 }
306}; 322};
307 323
324/* The set of possible wireless events */
325static struct sonypi_event sonypi_wlessev[] = {
326 { 0x59, SONYPI_EVENT_WIRELESS_ON },
327 { 0x5a, SONYPI_EVENT_WIRELESS_OFF },
328 { 0, 0 }
329};
330
308/* The set of possible back button events */ 331/* The set of possible back button events */
309static struct sonypi_event sonypi_backev[] = { 332static struct sonypi_event sonypi_backev[] = {
310 { 0x20, SONYPI_EVENT_BACK_PRESSED }, 333 { 0x20, SONYPI_EVENT_BACK_PRESSED },
@@ -383,7 +406,6 @@ static struct sonypi_eventtypes {
383 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev }, 406 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
384 { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev }, 407 { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
385 { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev }, 408 { SONYPI_DEVICE_MODEL_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
386 { SONYPI_DEVICE_MODEL_TYPE2, 0x08, SONYPI_HELP_MASK, sonypi_helpev },
387 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev }, 409 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
388 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev }, 410 { SONYPI_DEVICE_MODEL_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
389 { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev }, 411 { SONYPI_DEVICE_MODEL_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
@@ -391,6 +413,12 @@ static struct sonypi_eventtypes {
391 { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev }, 413 { SONYPI_DEVICE_MODEL_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
392 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev }, 414 { SONYPI_DEVICE_MODEL_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
393 415
416 { SONYPI_DEVICE_MODEL_TYPE3, 0, 0xffffffff, sonypi_releaseev },
417 { SONYPI_DEVICE_MODEL_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
418 { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
419 { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
420 { SONYPI_DEVICE_MODEL_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
421 { SONYPI_DEVICE_MODEL_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
394 { 0 } 422 { 0 }
395}; 423};
396 424
@@ -563,6 +591,23 @@ static void sonypi_type2_srs(void)
563 udelay(10); 591 udelay(10);
564} 592}
565 593
594static void sonypi_type3_srs(void)
595{
596 u16 v16;
597 u8 v8;
598
599 /* This model type uses the same initialiazation of
600 * the embedded controller as the type2 models. */
601 sonypi_type2_srs();
602
603 /* Initialization of PCI config space of the LPC interface bridge. */
604 v16 = (sonypi_device.ioport1 & 0xFFF0) | 0x01;
605 pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, v16);
606 pci_read_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, &v8);
607 v8 = (v8 & 0xCF) | 0x10;
608 pci_write_config_byte(sonypi_device.dev, SONYPI_TYPE3_MISC, v8);
609}
610
566/* Disables the device - this comes from the AML code in the ACPI bios */ 611/* Disables the device - this comes from the AML code in the ACPI bios */
567static void sonypi_type1_dis(void) 612static void sonypi_type1_dis(void)
568{ 613{
@@ -587,6 +632,13 @@ static void sonypi_type2_dis(void)
587 printk(KERN_WARNING "ec_write failed\n"); 632 printk(KERN_WARNING "ec_write failed\n");
588} 633}
589 634
635static void sonypi_type3_dis(void)
636{
637 sonypi_type2_dis();
638 udelay(10);
639 pci_write_config_word(sonypi_device.dev, SONYPI_TYPE3_GID2, 0);
640}
641
590static u8 sonypi_call1(u8 dev) 642static u8 sonypi_call1(u8 dev)
591{ 643{
592 u8 v1, v2; 644 u8 v1, v2;
@@ -1067,10 +1119,17 @@ static struct miscdevice sonypi_misc_device = {
1067 1119
1068static void sonypi_enable(unsigned int camera_on) 1120static void sonypi_enable(unsigned int camera_on)
1069{ 1121{
1070 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) 1122 switch (sonypi_device.model) {
1071 sonypi_type2_srs(); 1123 case SONYPI_DEVICE_MODEL_TYPE1:
1072 else
1073 sonypi_type1_srs(); 1124 sonypi_type1_srs();
1125 break;
1126 case SONYPI_DEVICE_MODEL_TYPE2:
1127 sonypi_type2_srs();
1128 break;
1129 case SONYPI_DEVICE_MODEL_TYPE3:
1130 sonypi_type3_srs();
1131 break;
1132 }
1074 1133
1075 sonypi_call1(0x82); 1134 sonypi_call1(0x82);
1076 sonypi_call2(0x81, 0xff); 1135 sonypi_call2(0x81, 0xff);
@@ -1094,10 +1153,18 @@ static int sonypi_disable(void)
1094 if (!SONYPI_ACPI_ACTIVE && fnkeyinit) 1153 if (!SONYPI_ACPI_ACTIVE && fnkeyinit)
1095 outb(0xf1, 0xb2); 1154 outb(0xf1, 0xb2);
1096 1155
1097 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) 1156 switch (sonypi_device.model) {
1098 sonypi_type2_dis(); 1157 case SONYPI_DEVICE_MODEL_TYPE1:
1099 else
1100 sonypi_type1_dis(); 1158 sonypi_type1_dis();
1159 break;
1160 case SONYPI_DEVICE_MODEL_TYPE2:
1161 sonypi_type2_dis();
1162 break;
1163 case SONYPI_DEVICE_MODEL_TYPE3:
1164 sonypi_type3_dis();
1165 break;
1166 }
1167
1101 return 0; 1168 return 0;
1102} 1169}
1103 1170
@@ -1143,12 +1210,16 @@ static int __devinit sonypi_probe(void)
1143 struct sonypi_irq_list *irq_list; 1210 struct sonypi_irq_list *irq_list;
1144 struct pci_dev *pcidev; 1211 struct pci_dev *pcidev;
1145 1212
1146 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1213 if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1147 PCI_DEVICE_ID_INTEL_82371AB_3, NULL); 1214 PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
1215 sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE1;
1216 else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1217 PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
1218 sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE3;
1219 else
1220 sonypi_device.model = SONYPI_DEVICE_MODEL_TYPE2;
1148 1221
1149 sonypi_device.dev = pcidev; 1222 sonypi_device.dev = pcidev;
1150 sonypi_device.model = pcidev ?
1151 SONYPI_DEVICE_MODEL_TYPE1 : SONYPI_DEVICE_MODEL_TYPE2;
1152 1223
1153 spin_lock_init(&sonypi_device.fifo_lock); 1224 spin_lock_init(&sonypi_device.fifo_lock);
1154 sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL, 1225 sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
@@ -1176,16 +1247,22 @@ static int __devinit sonypi_probe(void)
1176 goto out_miscreg; 1247 goto out_miscreg;
1177 } 1248 }
1178 1249
1179 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) { 1250
1251 if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) {
1252 ioport_list = sonypi_type1_ioport_list;
1253 sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE;
1254 sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET;
1255 irq_list = sonypi_type1_irq_list;
1256 } else if (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE2) {
1180 ioport_list = sonypi_type2_ioport_list; 1257 ioport_list = sonypi_type2_ioport_list;
1181 sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE; 1258 sonypi_device.region_size = SONYPI_TYPE2_REGION_SIZE;
1182 sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET; 1259 sonypi_device.evtype_offset = SONYPI_TYPE2_EVTYPE_OFFSET;
1183 irq_list = sonypi_type2_irq_list; 1260 irq_list = sonypi_type2_irq_list;
1184 } else { 1261 } else {
1185 ioport_list = sonypi_type1_ioport_list; 1262 ioport_list = sonypi_type3_ioport_list;
1186 sonypi_device.region_size = SONYPI_TYPE1_REGION_SIZE; 1263 sonypi_device.region_size = SONYPI_TYPE3_REGION_SIZE;
1187 sonypi_device.evtype_offset = SONYPI_TYPE1_EVTYPE_OFFSET; 1264 sonypi_device.evtype_offset = SONYPI_TYPE3_EVTYPE_OFFSET;
1188 irq_list = sonypi_type1_irq_list; 1265 irq_list = sonypi_type3_irq_list;
1189 } 1266 }
1190 1267
1191 for (i = 0; ioport_list[i].port1; i++) { 1268 for (i = 0; ioport_list[i].port1; i++) {
@@ -1274,11 +1351,10 @@ static int __devinit sonypi_probe(void)
1274 1351
1275 printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver" 1352 printk(KERN_INFO "sonypi: Sony Programmable I/O Controller Driver"
1276 "v%s.\n", SONYPI_DRIVER_VERSION); 1353 "v%s.\n", SONYPI_DRIVER_VERSION);
1277 printk(KERN_INFO "sonypi: detected %s model, " 1354 printk(KERN_INFO "sonypi: detected type%d model, "
1278 "verbose = %d, fnkeyinit = %s, camera = %s, " 1355 "verbose = %d, fnkeyinit = %s, camera = %s, "
1279 "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n", 1356 "compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
1280 (sonypi_device.model == SONYPI_DEVICE_MODEL_TYPE1) ? 1357 sonypi_device.model,
1281 "type1" : "type2",
1282 verbose, 1358 verbose,
1283 fnkeyinit ? "on" : "off", 1359 fnkeyinit ? "on" : "off",
1284 camera ? "on" : "off", 1360 camera ? "on" : "off",
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index cc2cc77fd174..c0d64914595f 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -206,6 +206,9 @@ static struct pci_device_id tpm_pci_tbl[] __devinitdata = {
206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)}, 206 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0)},
207 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)}, 207 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12)},
208 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)}, 208 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0)},
209 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0)},
210 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1)},
211 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0)},
209 {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)}, 212 {PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_LPC)},
210 {PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)}, 213 {PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6LPC)},
211 {0,} 214 {0,}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 6e4be3bb2d89..9d657127f313 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -153,7 +153,6 @@ static int tty_release(struct inode *, struct file *);
153int tty_ioctl(struct inode * inode, struct file * file, 153int tty_ioctl(struct inode * inode, struct file * file,
154 unsigned int cmd, unsigned long arg); 154 unsigned int cmd, unsigned long arg);
155static int tty_fasync(int fd, struct file * filp, int on); 155static int tty_fasync(int fd, struct file * filp, int on);
156extern void rs_360_init(void);
157static void release_mem(struct tty_struct *tty, int idx); 156static void release_mem(struct tty_struct *tty, int idx);
158 157
159 158
@@ -2911,11 +2910,6 @@ void __init console_init(void)
2911#ifdef CONFIG_EARLY_PRINTK 2910#ifdef CONFIG_EARLY_PRINTK
2912 disable_early_printk(); 2911 disable_early_printk();
2913#endif 2912#endif
2914#ifdef CONFIG_SERIAL_68360
2915 /* This is not a console initcall. I know not what it's doing here.
2916 So I haven't moved it. dwmw2 */
2917 rs_360_init();
2918#endif
2919 call = __con_initcall_start; 2913 call = __con_initcall_start;
2920 while (call < __con_initcall_end) { 2914 while (call < __con_initcall_end) {
2921 (*call)(); 2915 (*call)();
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 665103ccaee8..b8d0c290b0db 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -434,21 +434,25 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
434/* used by selection: complement pointer position */ 434/* used by selection: complement pointer position */
435void complement_pos(struct vc_data *vc, int offset) 435void complement_pos(struct vc_data *vc, int offset)
436{ 436{
437 static unsigned short *p; 437 static int old_offset = -1;
438 static unsigned short old; 438 static unsigned short old;
439 static unsigned short oldx, oldy; 439 static unsigned short oldx, oldy;
440 440
441 WARN_CONSOLE_UNLOCKED(); 441 WARN_CONSOLE_UNLOCKED();
442 442
443 if (p) { 443 if (old_offset != -1 && old_offset >= 0 &&
444 scr_writew(old, p); 444 old_offset < vc->vc_screenbuf_size) {
445 scr_writew(old, screenpos(vc, old_offset, 1));
445 if (DO_UPDATE(vc)) 446 if (DO_UPDATE(vc))
446 vc->vc_sw->con_putc(vc, old, oldy, oldx); 447 vc->vc_sw->con_putc(vc, old, oldy, oldx);
447 } 448 }
448 if (offset == -1) 449
449 p = NULL; 450 old_offset = offset;
450 else { 451
452 if (offset != -1 && offset >= 0 &&
453 offset < vc->vc_screenbuf_size) {
451 unsigned short new; 454 unsigned short new;
455 unsigned short *p;
452 p = screenpos(vc, offset, 1); 456 p = screenpos(vc, offset, 1);
453 old = scr_readw(p); 457 old = scr_readw(p);
454 new = old ^ vc->vc_complement_mask; 458 new = old ^ vc->vc_complement_mask;
@@ -459,6 +463,7 @@ void complement_pos(struct vc_data *vc, int offset)
459 vc->vc_sw->con_putc(vc, new, oldy, oldx); 463 vc->vc_sw->con_putc(vc, new, oldy, oldx);
460 } 464 }
461 } 465 }
466
462} 467}
463 468
464static void insert_char(struct vc_data *vc, unsigned int nr) 469static void insert_char(struct vc_data *vc, unsigned int nr)
@@ -2272,7 +2277,9 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2272 ret = paste_selection(tty); 2277 ret = paste_selection(tty);
2273 break; 2278 break;
2274 case TIOCL_UNBLANKSCREEN: 2279 case TIOCL_UNBLANKSCREEN:
2280 acquire_console_sem();
2275 unblank_screen(); 2281 unblank_screen();
2282 release_console_sem();
2276 break; 2283 break;
2277 case TIOCL_SELLOADLUT: 2284 case TIOCL_SELLOADLUT:
2278 ret = sel_loadlut(p); 2285 ret = sel_loadlut(p);
@@ -2317,8 +2324,10 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
2317 } 2324 }
2318 break; 2325 break;
2319 case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */ 2326 case TIOCL_BLANKSCREEN: /* until explicitly unblanked, not only poked */
2327 acquire_console_sem();
2320 ignore_poke = 1; 2328 ignore_poke = 1;
2321 do_blank_screen(0); 2329 do_blank_screen(0);
2330 release_console_sem();
2322 break; 2331 break;
2323 case TIOCL_BLANKEDSCREEN: 2332 case TIOCL_BLANKEDSCREEN:
2324 ret = console_blanked; 2333 ret = console_blanked;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 5b29c3b2a331..327b58e64875 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -58,4 +58,31 @@ config EFI_PCDP
58 58
59 See <http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf> 59 See <http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf>
60 60
61config DELL_RBU
62 tristate "BIOS update support for DELL systems via sysfs"
63 select FW_LOADER
64 help
65 Say m if you want to have the option of updating the BIOS for your
66 DELL system. Note you need a Dell OpenManage or Dell Update package (DUP)
67 supporting application to comunicate with the BIOS regarding the new
68 image for the image update to take effect.
69 See <file:Documentation/dell_rbu.txt> for more details on the driver.
70
71config DCDBAS
72 tristate "Dell Systems Management Base Driver"
73 depends on X86 || X86_64
74 default m
75 help
76 The Dell Systems Management Base Driver provides a sysfs interface
77 for systems management software to perform System Management
78 Interrupts (SMIs) and Host Control Actions (system power cycle or
79 power off after OS shutdown) on certain Dell systems.
80
81 See <file:Documentation/dcdbas.txt> for more details on the driver
82 and the Dell systems on which Dell systems management software makes
83 use of this driver.
84
85 Say Y or M here to enable the driver for use by Dell systems
86 management software such as Dell OpenManage.
87
61endmenu 88endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 90fd0b26db8b..85429979d0db 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -4,3 +4,5 @@
4obj-$(CONFIG_EDD) += edd.o 4obj-$(CONFIG_EDD) += edd.o
5obj-$(CONFIG_EFI_VARS) += efivars.o 5obj-$(CONFIG_EFI_VARS) += efivars.o
6obj-$(CONFIG_EFI_PCDP) += pcdp.o 6obj-$(CONFIG_EFI_PCDP) += pcdp.o
7obj-$(CONFIG_DELL_RBU) += dell_rbu.o
8obj-$(CONFIG_DCDBAS) += dcdbas.o
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
new file mode 100644
index 000000000000..955537fe9958
--- /dev/null
+++ b/drivers/firmware/dcdbas.c
@@ -0,0 +1,596 @@
1/*
2 * dcdbas.c: Dell Systems Management Base Driver
3 *
4 * The Dell Systems Management Base Driver provides a sysfs interface for
5 * systems management software to perform System Management Interrupts (SMIs)
6 * and Host Control Actions (power cycle or power off after OS shutdown) on
7 * Dell systems.
8 *
9 * See Documentation/dcdbas.txt for more information.
10 *
11 * Copyright (C) 1995-2005 Dell Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License v2.0 as published by
15 * the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/kernel.h>
28#include <linux/mc146818rtc.h>
29#include <linux/module.h>
30#include <linux/reboot.h>
31#include <linux/sched.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34#include <linux/string.h>
35#include <linux/types.h>
36#include <asm/io.h>
37#include <asm/semaphore.h>
38
39#include "dcdbas.h"
40
41#define DRIVER_NAME "dcdbas"
42#define DRIVER_VERSION "5.6.0-1"
43#define DRIVER_DESCRIPTION "Dell Systems Management Base Driver"
44
45static struct platform_device *dcdbas_pdev;
46
47static u8 *smi_data_buf;
48static dma_addr_t smi_data_buf_handle;
49static unsigned long smi_data_buf_size;
50static u32 smi_data_buf_phys_addr;
51static DECLARE_MUTEX(smi_data_lock);
52
53static unsigned int host_control_action;
54static unsigned int host_control_smi_type;
55static unsigned int host_control_on_shutdown;
56
57/**
58 * smi_data_buf_free: free SMI data buffer
59 */
60static void smi_data_buf_free(void)
61{
62 if (!smi_data_buf)
63 return;
64
65 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
66 __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
67
68 dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
69 smi_data_buf_handle);
70 smi_data_buf = NULL;
71 smi_data_buf_handle = 0;
72 smi_data_buf_phys_addr = 0;
73 smi_data_buf_size = 0;
74}
75
76/**
77 * smi_data_buf_realloc: grow SMI data buffer if needed
78 */
79static int smi_data_buf_realloc(unsigned long size)
80{
81 void *buf;
82 dma_addr_t handle;
83
84 if (smi_data_buf_size >= size)
85 return 0;
86
87 if (size > MAX_SMI_DATA_BUF_SIZE)
88 return -EINVAL;
89
90 /* new buffer is needed */
91 buf = dma_alloc_coherent(&dcdbas_pdev->dev, size, &handle, GFP_KERNEL);
92 if (!buf) {
93 dev_dbg(&dcdbas_pdev->dev,
94 "%s: failed to allocate memory size %lu\n",
95 __FUNCTION__, size);
96 return -ENOMEM;
97 }
98 /* memory zeroed by dma_alloc_coherent */
99
100 if (smi_data_buf)
101 memcpy(buf, smi_data_buf, smi_data_buf_size);
102
103 /* free any existing buffer */
104 smi_data_buf_free();
105
106 /* set up new buffer for use */
107 smi_data_buf = buf;
108 smi_data_buf_handle = handle;
109 smi_data_buf_phys_addr = (u32) virt_to_phys(buf);
110 smi_data_buf_size = size;
111
112 dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
113 __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
114
115 return 0;
116}
117
118static ssize_t smi_data_buf_phys_addr_show(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
121{
122 return sprintf(buf, "%x\n", smi_data_buf_phys_addr);
123}
124
125static ssize_t smi_data_buf_size_show(struct device *dev,
126 struct device_attribute *attr,
127 char *buf)
128{
129 return sprintf(buf, "%lu\n", smi_data_buf_size);
130}
131
132static ssize_t smi_data_buf_size_store(struct device *dev,
133 struct device_attribute *attr,
134 const char *buf, size_t count)
135{
136 unsigned long buf_size;
137 ssize_t ret;
138
139 buf_size = simple_strtoul(buf, NULL, 10);
140
141 /* make sure SMI data buffer is at least buf_size */
142 down(&smi_data_lock);
143 ret = smi_data_buf_realloc(buf_size);
144 up(&smi_data_lock);
145 if (ret)
146 return ret;
147
148 return count;
149}
150
151static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
152 size_t count)
153{
154 size_t max_read;
155 ssize_t ret;
156
157 down(&smi_data_lock);
158
159 if (pos >= smi_data_buf_size) {
160 ret = 0;
161 goto out;
162 }
163
164 max_read = smi_data_buf_size - pos;
165 ret = min(max_read, count);
166 memcpy(buf, smi_data_buf + pos, ret);
167out:
168 up(&smi_data_lock);
169 return ret;
170}
171
172static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
173 size_t count)
174{
175 ssize_t ret;
176
177 down(&smi_data_lock);
178
179 ret = smi_data_buf_realloc(pos + count);
180 if (ret)
181 goto out;
182
183 memcpy(smi_data_buf + pos, buf, count);
184 ret = count;
185out:
186 up(&smi_data_lock);
187 return ret;
188}
189
190static ssize_t host_control_action_show(struct device *dev,
191 struct device_attribute *attr,
192 char *buf)
193{
194 return sprintf(buf, "%u\n", host_control_action);
195}
196
197static ssize_t host_control_action_store(struct device *dev,
198 struct device_attribute *attr,
199 const char *buf, size_t count)
200{
201 ssize_t ret;
202
203 /* make sure buffer is available for host control command */
204 down(&smi_data_lock);
205 ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
206 up(&smi_data_lock);
207 if (ret)
208 return ret;
209
210 host_control_action = simple_strtoul(buf, NULL, 10);
211 return count;
212}
213
214static ssize_t host_control_smi_type_show(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217{
218 return sprintf(buf, "%u\n", host_control_smi_type);
219}
220
221static ssize_t host_control_smi_type_store(struct device *dev,
222 struct device_attribute *attr,
223 const char *buf, size_t count)
224{
225 host_control_smi_type = simple_strtoul(buf, NULL, 10);
226 return count;
227}
228
229static ssize_t host_control_on_shutdown_show(struct device *dev,
230 struct device_attribute *attr,
231 char *buf)
232{
233 return sprintf(buf, "%u\n", host_control_on_shutdown);
234}
235
236static ssize_t host_control_on_shutdown_store(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf, size_t count)
239{
240 host_control_on_shutdown = simple_strtoul(buf, NULL, 10);
241 return count;
242}
243
244/**
245 * smi_request: generate SMI request
246 *
247 * Called with smi_data_lock.
248 */
249static int smi_request(struct smi_cmd *smi_cmd)
250{
251 cpumask_t old_mask;
252 int ret = 0;
253
254 if (smi_cmd->magic != SMI_CMD_MAGIC) {
255 dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
256 __FUNCTION__);
257 return -EBADR;
258 }
259
260 /* SMI requires CPU 0 */
261 old_mask = current->cpus_allowed;
262 set_cpus_allowed(current, cpumask_of_cpu(0));
263 if (smp_processor_id() != 0) {
264 dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
265 __FUNCTION__);
266 ret = -EBUSY;
267 goto out;
268 }
269
270 /* generate SMI */
271 asm volatile (
272 "outb %b0,%w1"
273 : /* no output args */
274 : "a" (smi_cmd->command_code),
275 "d" (smi_cmd->command_address),
276 "b" (smi_cmd->ebx),
277 "c" (smi_cmd->ecx)
278 : "memory"
279 );
280
281out:
282 set_cpus_allowed(current, old_mask);
283 return ret;
284}
285
286/**
287 * smi_request_store:
288 *
289 * The valid values are:
290 * 0: zero SMI data buffer
291 * 1: generate calling interface SMI
292 * 2: generate raw SMI
293 *
294 * User application writes smi_cmd to smi_data before telling driver
295 * to generate SMI.
296 */
297static ssize_t smi_request_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t count)
300{
301 struct smi_cmd *smi_cmd;
302 unsigned long val = simple_strtoul(buf, NULL, 10);
303 ssize_t ret;
304
305 down(&smi_data_lock);
306
307 if (smi_data_buf_size < sizeof(struct smi_cmd)) {
308 ret = -ENODEV;
309 goto out;
310 }
311 smi_cmd = (struct smi_cmd *)smi_data_buf;
312
313 switch (val) {
314 case 2:
315 /* Raw SMI */
316 ret = smi_request(smi_cmd);
317 if (!ret)
318 ret = count;
319 break;
320 case 1:
321 /* Calling Interface SMI */
322 smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer);
323 ret = smi_request(smi_cmd);
324 if (!ret)
325 ret = count;
326 break;
327 case 0:
328 memset(smi_data_buf, 0, smi_data_buf_size);
329 ret = count;
330 break;
331 default:
332 ret = -EINVAL;
333 break;
334 }
335
336out:
337 up(&smi_data_lock);
338 return ret;
339}
340
341/**
342 * host_control_smi: generate host control SMI
343 *
344 * Caller must set up the host control command in smi_data_buf.
345 */
346static int host_control_smi(void)
347{
348 struct apm_cmd *apm_cmd;
349 u8 *data;
350 unsigned long flags;
351 u32 num_ticks;
352 s8 cmd_status;
353 u8 index;
354
355 apm_cmd = (struct apm_cmd *)smi_data_buf;
356 apm_cmd->status = ESM_STATUS_CMD_UNSUCCESSFUL;
357
358 switch (host_control_smi_type) {
359 case HC_SMITYPE_TYPE1:
360 spin_lock_irqsave(&rtc_lock, flags);
361 /* write SMI data buffer physical address */
362 data = (u8 *)&smi_data_buf_phys_addr;
363 for (index = PE1300_CMOS_CMD_STRUCT_PTR;
364 index < (PE1300_CMOS_CMD_STRUCT_PTR + 4);
365 index++, data++) {
366 outb(index,
367 (CMOS_BASE_PORT + CMOS_PAGE2_INDEX_PORT_PIIX4));
368 outb(*data,
369 (CMOS_BASE_PORT + CMOS_PAGE2_DATA_PORT_PIIX4));
370 }
371
372 /* first set status to -1 as called by spec */
373 cmd_status = ESM_STATUS_CMD_UNSUCCESSFUL;
374 outb((u8) cmd_status, PCAT_APM_STATUS_PORT);
375
376 /* generate SMM call */
377 outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
378 spin_unlock_irqrestore(&rtc_lock, flags);
379
380 /* wait a few to see if it executed */
381 num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
382 while ((cmd_status = inb(PCAT_APM_STATUS_PORT))
383 == ESM_STATUS_CMD_UNSUCCESSFUL) {
384 num_ticks--;
385 if (num_ticks == EXPIRED_TIMER)
386 return -ETIME;
387 }
388 break;
389
390 case HC_SMITYPE_TYPE2:
391 case HC_SMITYPE_TYPE3:
392 spin_lock_irqsave(&rtc_lock, flags);
393 /* write SMI data buffer physical address */
394 data = (u8 *)&smi_data_buf_phys_addr;
395 for (index = PE1400_CMOS_CMD_STRUCT_PTR;
396 index < (PE1400_CMOS_CMD_STRUCT_PTR + 4);
397 index++, data++) {
398 outb(index, (CMOS_BASE_PORT + CMOS_PAGE1_INDEX_PORT));
399 outb(*data, (CMOS_BASE_PORT + CMOS_PAGE1_DATA_PORT));
400 }
401
402 /* generate SMM call */
403 if (host_control_smi_type == HC_SMITYPE_TYPE3)
404 outb(ESM_APM_CMD, PCAT_APM_CONTROL_PORT);
405 else
406 outb(ESM_APM_CMD, PE1400_APM_CONTROL_PORT);
407
408 /* restore RTC index pointer since it was written to above */
409 CMOS_READ(RTC_REG_C);
410 spin_unlock_irqrestore(&rtc_lock, flags);
411
412 /* read control port back to serialize write */
413 cmd_status = inb(PE1400_APM_CONTROL_PORT);
414
415 /* wait a few to see if it executed */
416 num_ticks = TIMEOUT_USEC_SHORT_SEMA_BLOCKING;
417 while (apm_cmd->status == ESM_STATUS_CMD_UNSUCCESSFUL) {
418 num_ticks--;
419 if (num_ticks == EXPIRED_TIMER)
420 return -ETIME;
421 }
422 break;
423
424 default:
425 dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
426 __FUNCTION__, host_control_smi_type);
427 return -ENOSYS;
428 }
429
430 return 0;
431}
432
433/**
434 * dcdbas_host_control: initiate host control
435 *
436 * This function is called by the driver after the system has
437 * finished shutting down if the user application specified a
438 * host control action to perform on shutdown. It is safe to
439 * use smi_data_buf at this point because the system has finished
440 * shutting down and no userspace apps are running.
441 */
442static void dcdbas_host_control(void)
443{
444 struct apm_cmd *apm_cmd;
445 u8 action;
446
447 if (host_control_action == HC_ACTION_NONE)
448 return;
449
450 action = host_control_action;
451 host_control_action = HC_ACTION_NONE;
452
453 if (!smi_data_buf) {
454 dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__);
455 return;
456 }
457
458 if (smi_data_buf_size < sizeof(struct apm_cmd)) {
459 dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
460 __FUNCTION__);
461 return;
462 }
463
464 apm_cmd = (struct apm_cmd *)smi_data_buf;
465
466 /* power off takes precedence */
467 if (action & HC_ACTION_HOST_CONTROL_POWEROFF) {
468 apm_cmd->command = ESM_APM_POWER_CYCLE;
469 apm_cmd->reserved = 0;
470 *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 0;
471 host_control_smi();
472 } else if (action & HC_ACTION_HOST_CONTROL_POWERCYCLE) {
473 apm_cmd->command = ESM_APM_POWER_CYCLE;
474 apm_cmd->reserved = 0;
475 *((s16 *)&apm_cmd->parameters.shortreq.parm[0]) = (s16) 20;
476 host_control_smi();
477 }
478}
479
480/**
481 * dcdbas_reboot_notify: handle reboot notification for host control
482 */
483static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
484 void *unused)
485{
486 static unsigned int notify_cnt = 0;
487
488 switch (code) {
489 case SYS_DOWN:
490 case SYS_HALT:
491 case SYS_POWER_OFF:
492 if (host_control_on_shutdown) {
493 /* firmware is going to perform host control action */
494 if (++notify_cnt == 2) {
495 printk(KERN_WARNING
496 "Please wait for shutdown "
497 "action to complete...\n");
498 dcdbas_host_control();
499 }
500 /*
501 * register again and initiate the host control
502 * action on the second notification to allow
503 * everyone that registered to be notified
504 */
505 register_reboot_notifier(nb);
506 }
507 break;
508 }
509
510 return NOTIFY_DONE;
511}
512
513static struct notifier_block dcdbas_reboot_nb = {
514 .notifier_call = dcdbas_reboot_notify,
515 .next = NULL,
516 .priority = 0
517};
518
519static DCDBAS_BIN_ATTR_RW(smi_data);
520
521static struct bin_attribute *dcdbas_bin_attrs[] = {
522 &bin_attr_smi_data,
523 NULL
524};
525
526static DCDBAS_DEV_ATTR_RW(smi_data_buf_size);
527static DCDBAS_DEV_ATTR_RO(smi_data_buf_phys_addr);
528static DCDBAS_DEV_ATTR_WO(smi_request);
529static DCDBAS_DEV_ATTR_RW(host_control_action);
530static DCDBAS_DEV_ATTR_RW(host_control_smi_type);
531static DCDBAS_DEV_ATTR_RW(host_control_on_shutdown);
532
533static struct device_attribute *dcdbas_dev_attrs[] = {
534 &dev_attr_smi_data_buf_size,
535 &dev_attr_smi_data_buf_phys_addr,
536 &dev_attr_smi_request,
537 &dev_attr_host_control_action,
538 &dev_attr_host_control_smi_type,
539 &dev_attr_host_control_on_shutdown,
540 NULL
541};
542
543/**
544 * dcdbas_init: initialize driver
545 */
546static int __init dcdbas_init(void)
547{
548 int i;
549
550 host_control_action = HC_ACTION_NONE;
551 host_control_smi_type = HC_SMITYPE_NONE;
552
553 dcdbas_pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
554 if (IS_ERR(dcdbas_pdev))
555 return PTR_ERR(dcdbas_pdev);
556
557 /*
558 * BIOS SMI calls require buffer addresses be in 32-bit address space.
559 * This is done by setting the DMA mask below.
560 */
561 dcdbas_pdev->dev.coherent_dma_mask = DMA_32BIT_MASK;
562 dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
563
564 register_reboot_notifier(&dcdbas_reboot_nb);
565
566 for (i = 0; dcdbas_bin_attrs[i]; i++)
567 sysfs_create_bin_file(&dcdbas_pdev->dev.kobj,
568 dcdbas_bin_attrs[i]);
569
570 for (i = 0; dcdbas_dev_attrs[i]; i++)
571 device_create_file(&dcdbas_pdev->dev, dcdbas_dev_attrs[i]);
572
573 dev_info(&dcdbas_pdev->dev, "%s (version %s)\n",
574 DRIVER_DESCRIPTION, DRIVER_VERSION);
575
576 return 0;
577}
578
579/**
580 * dcdbas_exit: perform driver cleanup
581 */
582static void __exit dcdbas_exit(void)
583{
584 platform_device_unregister(dcdbas_pdev);
585 unregister_reboot_notifier(&dcdbas_reboot_nb);
586 smi_data_buf_free();
587}
588
589module_init(dcdbas_init);
590module_exit(dcdbas_exit);
591
592MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
593MODULE_VERSION(DRIVER_VERSION);
594MODULE_AUTHOR("Dell Inc.");
595MODULE_LICENSE("GPL");
596
diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h
new file mode 100644
index 000000000000..58a85182b3e8
--- /dev/null
+++ b/drivers/firmware/dcdbas.h
@@ -0,0 +1,107 @@
1/*
2 * dcdbas.h: Definitions for Dell Systems Management Base driver
3 *
4 * Copyright (C) 1995-2005 Dell Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2.0 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef _DCDBAS_H_
17#define _DCDBAS_H_
18
19#include <linux/device.h>
20#include <linux/input.h>
21#include <linux/sysfs.h>
22#include <linux/types.h>
23
24#define MAX_SMI_DATA_BUF_SIZE (256 * 1024)
25
26#define HC_ACTION_NONE (0)
27#define HC_ACTION_HOST_CONTROL_POWEROFF BIT(1)
28#define HC_ACTION_HOST_CONTROL_POWERCYCLE BIT(2)
29
30#define HC_SMITYPE_NONE (0)
31#define HC_SMITYPE_TYPE1 (1)
32#define HC_SMITYPE_TYPE2 (2)
33#define HC_SMITYPE_TYPE3 (3)
34
35#define ESM_APM_CMD (0x0A0)
36#define ESM_APM_POWER_CYCLE (0x10)
37#define ESM_STATUS_CMD_UNSUCCESSFUL (-1)
38
39#define CMOS_BASE_PORT (0x070)
40#define CMOS_PAGE1_INDEX_PORT (0)
41#define CMOS_PAGE1_DATA_PORT (1)
42#define CMOS_PAGE2_INDEX_PORT_PIIX4 (2)
43#define CMOS_PAGE2_DATA_PORT_PIIX4 (3)
44#define PE1400_APM_CONTROL_PORT (0x0B0)
45#define PCAT_APM_CONTROL_PORT (0x0B2)
46#define PCAT_APM_STATUS_PORT (0x0B3)
47#define PE1300_CMOS_CMD_STRUCT_PTR (0x38)
48#define PE1400_CMOS_CMD_STRUCT_PTR (0x70)
49
50#define MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN (14)
51#define MAX_SYSMGMT_LONGCMD_SGENTRY_NUM (16)
52
53#define TIMEOUT_USEC_SHORT_SEMA_BLOCKING (10000)
54#define EXPIRED_TIMER (0)
55
56#define SMI_CMD_MAGIC (0x534D4931)
57
58#define DCDBAS_DEV_ATTR_RW(_name) \
59 DEVICE_ATTR(_name,0600,_name##_show,_name##_store);
60
61#define DCDBAS_DEV_ATTR_RO(_name) \
62 DEVICE_ATTR(_name,0400,_name##_show,NULL);
63
64#define DCDBAS_DEV_ATTR_WO(_name) \
65 DEVICE_ATTR(_name,0200,NULL,_name##_store);
66
67#define DCDBAS_BIN_ATTR_RW(_name) \
68struct bin_attribute bin_attr_##_name = { \
69 .attr = { .name = __stringify(_name), \
70 .mode = 0600, \
71 .owner = THIS_MODULE }, \
72 .read = _name##_read, \
73 .write = _name##_write, \
74}
75
76struct smi_cmd {
77 __u32 magic;
78 __u32 ebx;
79 __u32 ecx;
80 __u16 command_address;
81 __u8 command_code;
82 __u8 reserved;
83 __u8 command_buffer[1];
84} __attribute__ ((packed));
85
86struct apm_cmd {
87 __u8 command;
88 __s8 status;
89 __u16 reserved;
90 union {
91 struct {
92 __u8 parm[MAX_SYSMGMT_SHORTCMD_PARMBUF_LEN];
93 } __attribute__ ((packed)) shortreq;
94
95 struct {
96 __u16 num_sg_entries;
97 struct {
98 __u32 size;
99 __u64 addr;
100 } __attribute__ ((packed))
101 sglist[MAX_SYSMGMT_LONGCMD_SGENTRY_NUM];
102 } __attribute__ ((packed)) longreq;
103 } __attribute__ ((packed)) parameters;
104} __attribute__ ((packed));
105
106#endif /* _DCDBAS_H_ */
107
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
new file mode 100644
index 000000000000..3b865f34a095
--- /dev/null
+++ b/drivers/firmware/dell_rbu.c
@@ -0,0 +1,634 @@
1/*
2 * dell_rbu.c
3 * Bios Update driver for Dell systems
4 * Author: Dell Inc
5 * Abhay Salunke <abhay_salunke@dell.com>
6 *
7 * Copyright (C) 2005 Dell Inc.
8 *
9 * Remote BIOS Update (rbu) driver is used for updating DELL BIOS by
10 * creating entries in the /sys file systems on Linux 2.6 and higher
11 * kernels. The driver supports two mechanism to update the BIOS namely
12 * contiguous and packetized. Both these methods still require having some
13 * application to set the CMOS bit indicating the BIOS to update itself
14 * after a reboot.
15 *
16 * Contiguous method:
17 * This driver writes the incoming data in a monolithic image by allocating
18 * contiguous physical pages large enough to accommodate the incoming BIOS
19 * image size.
20 *
21 * Packetized method:
22 * The driver writes the incoming packet image by allocating a new packet
23 * on every time the packet data is written. This driver requires an
24 * application to break the BIOS image in to fixed sized packet chunks.
25 *
26 * See Documentation/dell_rbu.txt for more info.
27 *
28 * This program is free software; you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License v2.0 as published by
30 * the Free Software Foundation
31 *
32 * This program is distributed in the hope that it will be useful,
33 * but WITHOUT ANY WARRANTY; without even the implied warranty of
34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 * GNU General Public License for more details.
36 */
37#include <linux/version.h>
38#include <linux/config.h>
39#include <linux/init.h>
40#include <linux/module.h>
41#include <linux/string.h>
42#include <linux/errno.h>
43#include <linux/blkdev.h>
44#include <linux/device.h>
45#include <linux/spinlock.h>
46#include <linux/moduleparam.h>
47#include <linux/firmware.h>
48#include <linux/dma-mapping.h>
49
50MODULE_AUTHOR("Abhay Salunke <abhay_salunke@dell.com>");
51MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems");
52MODULE_LICENSE("GPL");
53MODULE_VERSION("1.0");
54
55#define BIOS_SCAN_LIMIT 0xffffffff
56#define MAX_IMAGE_LENGTH 16
57static struct _rbu_data {
58 void *image_update_buffer;
59 unsigned long image_update_buffer_size;
60 unsigned long bios_image_size;
61 int image_update_ordernum;
62 int dma_alloc;
63 spinlock_t lock;
64 unsigned long packet_read_count;
65 unsigned long packet_write_count;
66 unsigned long num_packets;
67 unsigned long packetsize;
68} rbu_data;
69
70static char image_type[MAX_IMAGE_LENGTH] = "mono";
71module_param_string(image_type, image_type, sizeof(image_type), 0);
72MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet");
73
74struct packet_data {
75 struct list_head list;
76 size_t length;
77 void *data;
78 int ordernum;
79};
80
81static struct packet_data packet_data_head;
82
83static struct platform_device *rbu_device;
84static int context;
85static dma_addr_t dell_rbu_dmaaddr;
86
87static void init_packet_head(void)
88{
89 INIT_LIST_HEAD(&packet_data_head.list);
90 rbu_data.packet_write_count = 0;
91 rbu_data.packet_read_count = 0;
92 rbu_data.num_packets = 0;
93 rbu_data.packetsize = 0;
94}
95
96static int fill_last_packet(void *data, size_t length)
97{
98 struct list_head *ptemp_list;
99 struct packet_data *packet = NULL;
100 int packet_count = 0;
101
102 pr_debug("fill_last_packet: entry \n");
103
104 if (!rbu_data.num_packets) {
105 pr_debug("fill_last_packet: num_packets=0\n");
106 return -ENOMEM;
107 }
108
109 packet_count = rbu_data.num_packets;
110
111 ptemp_list = (&packet_data_head.list)->prev;
112
113 packet = list_entry(ptemp_list, struct packet_data, list);
114
115 if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) {
116 pr_debug("dell_rbu:%s: packet size data "
117 "overrun\n", __FUNCTION__);
118 return -EINVAL;
119 }
120
121 pr_debug("fill_last_packet : buffer = %p\n", packet->data);
122
123 memcpy((packet->data + rbu_data.packet_write_count), data, length);
124
125 if ((rbu_data.packet_write_count + length) == rbu_data.packetsize) {
126 /*
127 * this was the last data chunk in the packet
128 * so reinitialize the packet data counter to zero
129 */
130 rbu_data.packet_write_count = 0;
131 } else
132 rbu_data.packet_write_count += length;
133
134 pr_debug("fill_last_packet: exit \n");
135 return 0;
136}
137
138static int create_packet(size_t length)
139{
140 struct packet_data *newpacket;
141 int ordernum = 0;
142
143 pr_debug("create_packet: entry \n");
144
145 if (!rbu_data.packetsize) {
146 pr_debug("create_packet: packetsize not specified\n");
147 return -EINVAL;
148 }
149
150 newpacket = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
151 if (!newpacket) {
152 printk(KERN_WARNING
153 "dell_rbu:%s: failed to allocate new "
154 "packet\n", __FUNCTION__);
155 return -ENOMEM;
156 }
157
158 ordernum = get_order(length);
159 /*
160 * there is no upper limit on memory
161 * address for packetized mechanism
162 */
163 newpacket->data = (unsigned char *)__get_free_pages(GFP_KERNEL,
164 ordernum);
165
166 pr_debug("create_packet: newpacket %p\n", newpacket->data);
167
168 if (!newpacket->data) {
169 printk(KERN_WARNING
170 "dell_rbu:%s: failed to allocate new "
171 "packet\n", __FUNCTION__);
172 kfree(newpacket);
173 return -ENOMEM;
174 }
175
176 newpacket->ordernum = ordernum;
177 ++rbu_data.num_packets;
178 /*
179 * initialize the newly created packet headers
180 */
181 INIT_LIST_HEAD(&newpacket->list);
182 list_add_tail(&newpacket->list, &packet_data_head.list);
183 /*
184 * packets have fixed size
185 */
186 newpacket->length = rbu_data.packetsize;
187
188 pr_debug("create_packet: exit \n");
189
190 return 0;
191}
192
193static int packetize_data(void *data, size_t length)
194{
195 int rc = 0;
196
197 if (!rbu_data.packet_write_count) {
198 if ((rc = create_packet(length)))
199 return rc;
200 }
201 if ((rc = fill_last_packet(data, length)))
202 return rc;
203
204 return rc;
205}
206
207static int
208do_packet_read(char *data, struct list_head *ptemp_list,
209 int length, int bytes_read, int *list_read_count)
210{
211 void *ptemp_buf;
212 struct packet_data *newpacket = NULL;
213 int bytes_copied = 0;
214 int j = 0;
215
216 newpacket = list_entry(ptemp_list, struct packet_data, list);
217 *list_read_count += newpacket->length;
218
219 if (*list_read_count > bytes_read) {
220 /* point to the start of unread data */
221 j = newpacket->length - (*list_read_count - bytes_read);
222 /* point to the offset in the packet buffer */
223 ptemp_buf = (u8 *) newpacket->data + j;
224 /*
225 * check if there is enough room in
226 * * the incoming buffer
227 */
228 if (length > (*list_read_count - bytes_read))
229 /*
230 * copy what ever is there in this
231 * packet and move on
232 */
233 bytes_copied = (*list_read_count - bytes_read);
234 else
235 /* copy the remaining */
236 bytes_copied = length;
237 memcpy(data, ptemp_buf, bytes_copied);
238 }
239 return bytes_copied;
240}
241
242static int packet_read_list(char *data, size_t * pread_length)
243{
244 struct list_head *ptemp_list;
245 int temp_count = 0;
246 int bytes_copied = 0;
247 int bytes_read = 0;
248 int remaining_bytes = 0;
249 char *pdest = data;
250
251 /* check if we have any packets */
252 if (0 == rbu_data.num_packets)
253 return -ENOMEM;
254
255 remaining_bytes = *pread_length;
256 bytes_read = rbu_data.packet_read_count;
257
258 ptemp_list = (&packet_data_head.list)->next;
259 while (!list_empty(ptemp_list)) {
260 bytes_copied = do_packet_read(pdest, ptemp_list,
261 remaining_bytes, bytes_read,
262 &temp_count);
263 remaining_bytes -= bytes_copied;
264 bytes_read += bytes_copied;
265 pdest += bytes_copied;
266 /*
267 * check if we reached end of buffer before reaching the
268 * last packet
269 */
270 if (remaining_bytes == 0)
271 break;
272
273 ptemp_list = ptemp_list->next;
274 }
275 /*finally set the bytes read */
276 *pread_length = bytes_read - rbu_data.packet_read_count;
277 rbu_data.packet_read_count = bytes_read;
278 return 0;
279}
280
281static void packet_empty_list(void)
282{
283 struct list_head *ptemp_list;
284 struct list_head *pnext_list;
285 struct packet_data *newpacket;
286
287 ptemp_list = (&packet_data_head.list)->next;
288 while (!list_empty(ptemp_list)) {
289 newpacket =
290 list_entry(ptemp_list, struct packet_data, list);
291 pnext_list = ptemp_list->next;
292 list_del(ptemp_list);
293 ptemp_list = pnext_list;
294 /*
295 * zero out the RBU packet memory before freeing
296 * to make sure there are no stale RBU packets left in memory
297 */
298 memset(newpacket->data, 0, rbu_data.packetsize);
299 free_pages((unsigned long)newpacket->data,
300 newpacket->ordernum);
301 kfree(newpacket);
302 }
303 rbu_data.packet_write_count = 0;
304 rbu_data.packet_read_count = 0;
305 rbu_data.num_packets = 0;
306 rbu_data.packetsize = 0;
307}
308
309/*
310 * img_update_free: Frees the buffer allocated for storing BIOS image
311 * Always called with lock held and returned with lock held
312 */
313static void img_update_free(void)
314{
315 if (!rbu_data.image_update_buffer)
316 return;
317 /*
318 * zero out this buffer before freeing it to get rid of any stale
319 * BIOS image copied in memory.
320 */
321 memset(rbu_data.image_update_buffer, 0,
322 rbu_data.image_update_buffer_size);
323 if (rbu_data.dma_alloc == 1)
324 dma_free_coherent(NULL, rbu_data.bios_image_size,
325 rbu_data.image_update_buffer,
326 dell_rbu_dmaaddr);
327 else
328 free_pages((unsigned long)rbu_data.image_update_buffer,
329 rbu_data.image_update_ordernum);
330
331 /*
332 * Re-initialize the rbu_data variables after a free
333 */
334 rbu_data.image_update_ordernum = -1;
335 rbu_data.image_update_buffer = NULL;
336 rbu_data.image_update_buffer_size = 0;
337 rbu_data.bios_image_size = 0;
338 rbu_data.dma_alloc = 0;
339}
340
341/*
342 * img_update_realloc: This function allocates the contiguous pages to
343 * accommodate the requested size of data. The memory address and size
344 * values are stored globally and on every call to this function the new
345 * size is checked to see if more data is required than the existing size.
346 * If true the previous memory is freed and new allocation is done to
347 * accommodate the new size. If the incoming size is less then than the
348 * already allocated size, then that memory is reused. This function is
349 * called with lock held and returns with lock held.
350 */
351static int img_update_realloc(unsigned long size)
352{
353 unsigned char *image_update_buffer = NULL;
354 unsigned long rc;
355 unsigned long img_buf_phys_addr;
356 int ordernum;
357 int dma_alloc = 0;
358
359 /*
360 * check if the buffer of sufficient size has been
361 * already allocated
362 */
363 if (rbu_data.image_update_buffer_size >= size) {
364 /*
365 * check for corruption
366 */
367 if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
368 printk(KERN_ERR "dell_rbu:%s: corruption "
369 "check failed\n", __FUNCTION__);
370 return -EINVAL;
371 }
372 /*
373 * we have a valid pre-allocated buffer with
374 * sufficient size
375 */
376 return 0;
377 }
378
379 /*
380 * free any previously allocated buffer
381 */
382 img_update_free();
383
384 spin_unlock(&rbu_data.lock);
385
386 ordernum = get_order(size);
387 image_update_buffer =
388 (unsigned char *)__get_free_pages(GFP_KERNEL, ordernum);
389
390 img_buf_phys_addr =
391 (unsigned long)virt_to_phys(image_update_buffer);
392
393 if (img_buf_phys_addr > BIOS_SCAN_LIMIT) {
394 free_pages((unsigned long)image_update_buffer, ordernum);
395 ordernum = -1;
396 image_update_buffer = dma_alloc_coherent(NULL, size,
397 &dell_rbu_dmaaddr,
398 GFP_KERNEL);
399 dma_alloc = 1;
400 }
401
402 spin_lock(&rbu_data.lock);
403
404 if (image_update_buffer != NULL) {
405 rbu_data.image_update_buffer = image_update_buffer;
406 rbu_data.image_update_buffer_size = size;
407 rbu_data.bios_image_size =
408 rbu_data.image_update_buffer_size;
409 rbu_data.image_update_ordernum = ordernum;
410 rbu_data.dma_alloc = dma_alloc;
411 rc = 0;
412 } else {
413 pr_debug("Not enough memory for image update:"
414 "size = %ld\n", size);
415 rc = -ENOMEM;
416 }
417
418 return rc;
419}
420
421static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count)
422{
423 int retval;
424 size_t bytes_left;
425 size_t data_length;
426 char *ptempBuf = buffer;
427 unsigned long imagesize;
428
429 /* check to see if we have something to return */
430 if (rbu_data.num_packets == 0) {
431 pr_debug("read_packet_data: no packets written\n");
432 retval = -ENOMEM;
433 goto read_rbu_data_exit;
434 }
435
436 imagesize = rbu_data.num_packets * rbu_data.packetsize;
437
438 if (pos > imagesize) {
439 retval = 0;
440 printk(KERN_WARNING "dell_rbu:read_packet_data: "
441 "data underrun\n");
442 goto read_rbu_data_exit;
443 }
444
445 bytes_left = imagesize - pos;
446 data_length = min(bytes_left, count);
447
448 if ((retval = packet_read_list(ptempBuf, &data_length)) < 0)
449 goto read_rbu_data_exit;
450
451 if ((pos + count) > imagesize) {
452 rbu_data.packet_read_count = 0;
453 /* this was the last copy */
454 retval = bytes_left;
455 } else
456 retval = count;
457
458 read_rbu_data_exit:
459 return retval;
460}
461
462static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count)
463{
464 unsigned char *ptemp = NULL;
465 size_t bytes_left = 0;
466 size_t data_length = 0;
467 ssize_t ret_count = 0;
468
469 /* check to see if we have something to return */
470 if ((rbu_data.image_update_buffer == NULL) ||
471 (rbu_data.bios_image_size == 0)) {
472 pr_debug("read_rbu_data_mono: image_update_buffer %p ,"
473 "bios_image_size %lu\n",
474 rbu_data.image_update_buffer,
475 rbu_data.bios_image_size);
476 ret_count = -ENOMEM;
477 goto read_rbu_data_exit;
478 }
479
480 if (pos > rbu_data.bios_image_size) {
481 ret_count = 0;
482 goto read_rbu_data_exit;
483 }
484
485 bytes_left = rbu_data.bios_image_size - pos;
486 data_length = min(bytes_left, count);
487
488 ptemp = rbu_data.image_update_buffer;
489 memcpy(buffer, (ptemp + pos), data_length);
490
491 if ((pos + count) > rbu_data.bios_image_size)
492 /* this was the last copy */
493 ret_count = bytes_left;
494 else
495 ret_count = count;
496 read_rbu_data_exit:
497 return ret_count;
498}
499
500static ssize_t
501read_rbu_data(struct kobject *kobj, char *buffer, loff_t pos, size_t count)
502{
503 ssize_t ret_count = 0;
504
505 spin_lock(&rbu_data.lock);
506
507 if (!strcmp(image_type, "mono"))
508 ret_count = read_rbu_mono_data(buffer, pos, count);
509 else if (!strcmp(image_type, "packet"))
510 ret_count = read_packet_data(buffer, pos, count);
511 else
512 pr_debug("read_rbu_data: invalid image type specified\n");
513
514 spin_unlock(&rbu_data.lock);
515 return ret_count;
516}
517
518static ssize_t
519read_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
520 size_t count)
521{
522 int size = 0;
523 if (!pos)
524 size = sprintf(buffer, "%s\n", image_type);
525 return size;
526}
527
528static ssize_t
529write_rbu_image_type(struct kobject *kobj, char *buffer, loff_t pos,
530 size_t count)
531{
532 int rc = count;
533 spin_lock(&rbu_data.lock);
534
535 if (strlen(buffer) < MAX_IMAGE_LENGTH)
536 sscanf(buffer, "%s", image_type);
537 else
538 printk(KERN_WARNING "dell_rbu: image_type is invalid"
539 "max chars = %d, \n incoming str--%s-- \n",
540 MAX_IMAGE_LENGTH, buffer);
541
542 /* we must free all previous allocations */
543 packet_empty_list();
544 img_update_free();
545
546 spin_unlock(&rbu_data.lock);
547 return rc;
548
549}
550
551static struct bin_attribute rbu_data_attr = {
552 .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444},
553 .read = read_rbu_data,
554};
555
556static struct bin_attribute rbu_image_type_attr = {
557 .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644},
558 .read = read_rbu_image_type,
559 .write = write_rbu_image_type,
560};
561
562static void callbackfn_rbu(const struct firmware *fw, void *context)
563{
564 int rc = 0;
565
566 if (!fw || !fw->size)
567 return;
568
569 spin_lock(&rbu_data.lock);
570 if (!strcmp(image_type, "mono")) {
571 if (!img_update_realloc(fw->size))
572 memcpy(rbu_data.image_update_buffer,
573 fw->data, fw->size);
574 } else if (!strcmp(image_type, "packet")) {
575 if (!rbu_data.packetsize)
576 rbu_data.packetsize = fw->size;
577 else if (rbu_data.packetsize != fw->size) {
578 packet_empty_list();
579 rbu_data.packetsize = fw->size;
580 }
581 packetize_data(fw->data, fw->size);
582 } else
583 pr_debug("invalid image type specified.\n");
584 spin_unlock(&rbu_data.lock);
585
586 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
587 "dell_rbu", &rbu_device->dev,
588 &context, callbackfn_rbu);
589 if (rc)
590 printk(KERN_ERR
591 "dell_rbu:%s request_firmware_nowait failed"
592 " %d\n", __FUNCTION__, rc);
593}
594
595static int __init dcdrbu_init(void)
596{
597 int rc = 0;
598 spin_lock_init(&rbu_data.lock);
599
600 init_packet_head();
601 rbu_device =
602 platform_device_register_simple("dell_rbu", -1, NULL, 0);
603 if (!rbu_device) {
604 printk(KERN_ERR
605 "dell_rbu:%s:platform_device_register_simple "
606 "failed\n", __FUNCTION__);
607 return -EIO;
608 }
609
610 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr);
611 sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr);
612
613 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
614 "dell_rbu", &rbu_device->dev,
615 &context, callbackfn_rbu);
616 if (rc)
617 printk(KERN_ERR "dell_rbu:%s:request_firmware_nowait"
618 " failed %d\n", __FUNCTION__, rc);
619
620 return rc;
621
622}
623
624static __exit void dcdrbu_exit(void)
625{
626 spin_lock(&rbu_data.lock);
627 packet_empty_list();
628 img_update_free();
629 spin_unlock(&rbu_data.lock);
630 platform_device_unregister(rbu_device);
631}
632
633module_exit(dcdrbu_exit);
634module_init(dcdrbu_init);
diff --git a/drivers/i2c/chips/isp1301_omap.c b/drivers/i2c/chips/isp1301_omap.c
index 354a26295672..8ee56d4b3891 100644
--- a/drivers/i2c/chips/isp1301_omap.c
+++ b/drivers/i2c/chips/isp1301_omap.c
@@ -1489,7 +1489,7 @@ static int isp1301_probe(struct i2c_adapter *bus, int address, int kind)
1489 if (the_transceiver) 1489 if (the_transceiver)
1490 return 0; 1490 return 0;
1491 1491
1492 isp = kcalloc(1, sizeof *isp, GFP_KERNEL); 1492 isp = kzalloc(sizeof *isp, GFP_KERNEL);
1493 if (!isp) 1493 if (!isp)
1494 return 0; 1494 return 0;
1495 1495
diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c
index bebcc47ab06c..b23322523ef5 100644
--- a/drivers/ieee1394/nodemgr.c
+++ b/drivers/ieee1394/nodemgr.c
@@ -1068,6 +1068,8 @@ static int nodemgr_hotplug(struct class_device *cdev, char **envp, int num_envp,
1068 struct unit_directory *ud; 1068 struct unit_directory *ud;
1069 int i = 0; 1069 int i = 0;
1070 int length = 0; 1070 int length = 0;
1071 /* ieee1394:venNmoNspNverN */
1072 char buf[8 + 1 + 3 + 8 + 2 + 8 + 2 + 8 + 3 + 8 + 1];
1071 1073
1072 if (!cdev) 1074 if (!cdev)
1073 return -ENODEV; 1075 return -ENODEV;
@@ -1094,6 +1096,12 @@ do { \
1094 PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid); 1096 PUT_ENVP("GUID=%016Lx", (unsigned long long)ud->ne->guid);
1095 PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id); 1097 PUT_ENVP("SPECIFIER_ID=%06x", ud->specifier_id);
1096 PUT_ENVP("VERSION=%06x", ud->version); 1098 PUT_ENVP("VERSION=%06x", ud->version);
1099 snprintf(buf, sizeof(buf), "ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
1100 ud->vendor_id,
1101 ud->model_id,
1102 ud->specifier_id,
1103 ud->version);
1104 PUT_ENVP("MODALIAS=%s", buf);
1097 1105
1098#undef PUT_ENVP 1106#undef PUT_ENVP
1099 1107
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index fae1c2dcee51..211ba3223f65 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -463,7 +463,7 @@ alloc_group_attrs(ssize_t (*show)(struct ib_port *,
463 return NULL; 463 return NULL;
464 464
465 for (i = 0; i < len; i++) { 465 for (i = 0; i < len; i++) {
466 element = kcalloc(1, sizeof(struct port_table_attribute), 466 element = kzalloc(sizeof(struct port_table_attribute),
467 GFP_KERNEL); 467 GFP_KERNEL);
468 if (!element) 468 if (!element)
469 goto err; 469 goto err;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index f8b278d3559b..19c14c4beb44 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -393,6 +393,7 @@ static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
393 case EV_LED: bits = dev->ledbit; len = LED_MAX; break; 393 case EV_LED: bits = dev->ledbit; len = LED_MAX; break;
394 case EV_SND: bits = dev->sndbit; len = SND_MAX; break; 394 case EV_SND: bits = dev->sndbit; len = SND_MAX; break;
395 case EV_FF: bits = dev->ffbit; len = FF_MAX; break; 395 case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
396 case EV_SW: bits = dev->swbit; len = SW_MAX; break;
396 default: return -EINVAL; 397 default: return -EINVAL;
397 } 398 }
398 len = NBITS(len) * sizeof(long); 399 len = NBITS(len) * sizeof(long);
@@ -421,6 +422,13 @@ static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
421 return copy_to_user(p, dev->snd, len) ? -EFAULT : len; 422 return copy_to_user(p, dev->snd, len) ? -EFAULT : len;
422 } 423 }
423 424
425 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGSW(0))) {
426 int len;
427 len = NBITS(SW_MAX) * sizeof(long);
428 if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd);
429 return copy_to_user(p, dev->sw, len) ? -EFAULT : len;
430 }
431
424 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) { 432 if (_IOC_NR(cmd) == _IOC_NR(EVIOCGNAME(0))) {
425 int len; 433 int len;
426 if (!dev->name) return -ENOENT; 434 if (!dev->name) return -ENOENT;
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index a0118038330a..462f8d300aae 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -75,7 +75,7 @@ static int __devinit emu_probe(struct pci_dev *pdev, const struct pci_device_id
75 if (!request_region(ioport, iolen, "emu10k1-gp")) 75 if (!request_region(ioport, iolen, "emu10k1-gp"))
76 return -EBUSY; 76 return -EBUSY;
77 77
78 emu = kcalloc(1, sizeof(struct emu), GFP_KERNEL); 78 emu = kzalloc(sizeof(struct emu), GFP_KERNEL);
79 port = gameport_allocate_port(); 79 port = gameport_allocate_port();
80 if (!emu || !port) { 80 if (!emu || !port) {
81 printk(KERN_ERR "emu10k1-gp: Memory allocation failed\n"); 81 printk(KERN_ERR "emu10k1-gp: Memory allocation failed\n");
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 57615bc63906..47e93daa0fa7 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -83,7 +83,7 @@ static int __devinit fm801_gp_probe(struct pci_dev *pci, const struct pci_device
83 struct fm801_gp *gp; 83 struct fm801_gp *gp;
84 struct gameport *port; 84 struct gameport *port;
85 85
86 gp = kcalloc(1, sizeof(struct fm801_gp), GFP_KERNEL); 86 gp = kzalloc(sizeof(struct fm801_gp), GFP_KERNEL);
87 port = gameport_allocate_port(); 87 port = gameport_allocate_port();
88 if (!gp || !port) { 88 if (!gp || !port) {
89 printk(KERN_ERR "fm801-gp: Memory allocation failed\n"); 89 printk(KERN_ERR "fm801-gp: Memory allocation failed\n");
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index 70f051894a3c..d2e55dc956ba 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -142,7 +142,7 @@ static int ns558_isa_probe(int io)
142 return -EBUSY; 142 return -EBUSY;
143 } 143 }
144 144
145 ns558 = kcalloc(1, sizeof(struct ns558), GFP_KERNEL); 145 ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL);
146 port = gameport_allocate_port(); 146 port = gameport_allocate_port();
147 if (!ns558 || !port) { 147 if (!ns558 || !port) {
148 printk(KERN_ERR "ns558: Memory allocation failed.\n"); 148 printk(KERN_ERR "ns558: Memory allocation failed.\n");
@@ -215,7 +215,7 @@ static int ns558_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *did)
215 if (!request_region(ioport, iolen, "ns558-pnp")) 215 if (!request_region(ioport, iolen, "ns558-pnp"))
216 return -EBUSY; 216 return -EBUSY;
217 217
218 ns558 = kcalloc(1, sizeof(struct ns558), GFP_KERNEL); 218 ns558 = kzalloc(sizeof(struct ns558), GFP_KERNEL);
219 port = gameport_allocate_port(); 219 port = gameport_allocate_port();
220 if (!ns558 || !port) { 220 if (!ns558 || !port) {
221 printk(KERN_ERR "ns558: Memory allocation failed\n"); 221 printk(KERN_ERR "ns558: Memory allocation failed\n");
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a275211c8e1e..88636a204525 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -89,6 +89,15 @@ void input_event(struct input_dev *dev, unsigned int type, unsigned int code, in
89 89
90 break; 90 break;
91 91
92 case EV_SW:
93
94 if (code > SW_MAX || !test_bit(code, dev->swbit) || !!test_bit(code, dev->sw) == value)
95 return;
96
97 change_bit(code, dev->sw);
98
99 break;
100
92 case EV_ABS: 101 case EV_ABS:
93 102
94 if (code > ABS_MAX || !test_bit(code, dev->absbit)) 103 if (code > ABS_MAX || !test_bit(code, dev->absbit))
@@ -402,6 +411,7 @@ static void input_call_hotplug(char *verb, struct input_dev *dev)
402 SPRINTF_BIT_A2(ledbit, "LED=", LED_MAX, EV_LED); 411 SPRINTF_BIT_A2(ledbit, "LED=", LED_MAX, EV_LED);
403 SPRINTF_BIT_A2(sndbit, "SND=", SND_MAX, EV_SND); 412 SPRINTF_BIT_A2(sndbit, "SND=", SND_MAX, EV_SND);
404 SPRINTF_BIT_A2(ffbit, "FF=", FF_MAX, EV_FF); 413 SPRINTF_BIT_A2(ffbit, "FF=", FF_MAX, EV_FF);
414 SPRINTF_BIT_A2(swbit, "SW=", SW_MAX, EV_SW);
405 415
406 envp[i++] = NULL; 416 envp[i++] = NULL;
407 417
@@ -490,6 +500,7 @@ static int input_devices_read(char *buf, char **start, off_t pos, int count, int
490 SPRINTF_BIT_B2(ledbit, "LED=", LED_MAX, EV_LED); 500 SPRINTF_BIT_B2(ledbit, "LED=", LED_MAX, EV_LED);
491 SPRINTF_BIT_B2(sndbit, "SND=", SND_MAX, EV_SND); 501 SPRINTF_BIT_B2(sndbit, "SND=", SND_MAX, EV_SND);
492 SPRINTF_BIT_B2(ffbit, "FF=", FF_MAX, EV_FF); 502 SPRINTF_BIT_B2(ffbit, "FF=", FF_MAX, EV_FF);
503 SPRINTF_BIT_B2(swbit, "SW=", SW_MAX, EV_SW);
493 504
494 len += sprintf(buf + len, "\n"); 505 len += sprintf(buf + len, "\n");
495 506
diff --git a/drivers/input/joystick/a3d.c b/drivers/input/joystick/a3d.c
index bf34f75b9467..bf65430181fa 100644
--- a/drivers/input/joystick/a3d.c
+++ b/drivers/input/joystick/a3d.c
@@ -269,7 +269,7 @@ static int a3d_connect(struct gameport *gameport, struct gameport_driver *drv)
269 int i; 269 int i;
270 int err; 270 int err;
271 271
272 if (!(a3d = kcalloc(1, sizeof(struct a3d), GFP_KERNEL))) 272 if (!(a3d = kzalloc(sizeof(struct a3d), GFP_KERNEL)))
273 return -ENOMEM; 273 return -ENOMEM;
274 274
275 a3d->gameport = gameport; 275 a3d->gameport = gameport;
diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c
index 265962956c63..cf35ae638a0d 100644
--- a/drivers/input/joystick/adi.c
+++ b/drivers/input/joystick/adi.c
@@ -469,7 +469,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
469 int i; 469 int i;
470 int err; 470 int err;
471 471
472 if (!(port = kcalloc(1, sizeof(struct adi_port), GFP_KERNEL))) 472 if (!(port = kzalloc(sizeof(struct adi_port), GFP_KERNEL)))
473 return -ENOMEM; 473 return -ENOMEM;
474 474
475 port->gameport = gameport; 475 port->gameport = gameport;
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index c3a5739030c3..64b1313a3c66 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -655,7 +655,7 @@ static int analog_connect(struct gameport *gameport, struct gameport_driver *drv
655 int i; 655 int i;
656 int err; 656 int err;
657 657
658 if (!(port = kcalloc(1, sizeof(struct analog_port), GFP_KERNEL))) 658 if (!(port = kzalloc(sizeof(struct analog_port), GFP_KERNEL)))
659 return - ENOMEM; 659 return - ENOMEM;
660 660
661 err = analog_init_port(gameport, drv, port); 661 err = analog_init_port(gameport, drv, port);
diff --git a/drivers/input/joystick/cobra.c b/drivers/input/joystick/cobra.c
index a6002205328f..0b2e9fa26579 100644
--- a/drivers/input/joystick/cobra.c
+++ b/drivers/input/joystick/cobra.c
@@ -163,7 +163,7 @@ static int cobra_connect(struct gameport *gameport, struct gameport_driver *drv)
163 int i, j; 163 int i, j;
164 int err; 164 int err;
165 165
166 if (!(cobra = kcalloc(1, sizeof(struct cobra), GFP_KERNEL))) 166 if (!(cobra = kzalloc(sizeof(struct cobra), GFP_KERNEL)))
167 return -ENOMEM; 167 return -ENOMEM;
168 168
169 cobra->gameport = gameport; 169 cobra->gameport = gameport;
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index fbd3eed07f90..2a3e4bb2da50 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -572,7 +572,7 @@ static struct db9 __init *db9_probe(int *config, int nargs)
572 } 572 }
573 } 573 }
574 574
575 if (!(db9 = kcalloc(1, sizeof(struct db9), GFP_KERNEL))) { 575 if (!(db9 = kzalloc(sizeof(struct db9), GFP_KERNEL))) {
576 parport_put_port(pp); 576 parport_put_port(pp);
577 return NULL; 577 return NULL;
578 } 578 }
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 95bbdd302aad..5427bf9fc862 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -554,7 +554,7 @@ static struct gc __init *gc_probe(int *config, int nargs)
554 return NULL; 554 return NULL;
555 } 555 }
556 556
557 if (!(gc = kcalloc(1, sizeof(struct gc), GFP_KERNEL))) { 557 if (!(gc = kzalloc(sizeof(struct gc), GFP_KERNEL))) {
558 parport_put_port(pp); 558 parport_put_port(pp);
559 return NULL; 559 return NULL;
560 } 560 }
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 7d969420066c..8e4f92b115e6 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -242,7 +242,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
242 unsigned char data[GF2K_LENGTH]; 242 unsigned char data[GF2K_LENGTH];
243 int i, err; 243 int i, err;
244 244
245 if (!(gf2k = kcalloc(1, sizeof(struct gf2k), GFP_KERNEL))) 245 if (!(gf2k = kzalloc(sizeof(struct gf2k), GFP_KERNEL)))
246 return -ENOMEM; 246 return -ENOMEM;
247 247
248 gf2k->gameport = gameport; 248 gf2k->gameport = gameport;
diff --git a/drivers/input/joystick/grip.c b/drivers/input/joystick/grip.c
index d1500d2562d6..9d3f910dd568 100644
--- a/drivers/input/joystick/grip.c
+++ b/drivers/input/joystick/grip.c
@@ -301,7 +301,7 @@ static int grip_connect(struct gameport *gameport, struct gameport_driver *drv)
301 int i, j, t; 301 int i, j, t;
302 int err; 302 int err;
303 303
304 if (!(grip = kcalloc(1, sizeof(struct grip), GFP_KERNEL))) 304 if (!(grip = kzalloc(sizeof(struct grip), GFP_KERNEL)))
305 return -ENOMEM; 305 return -ENOMEM;
306 306
307 grip->gameport = gameport; 307 grip->gameport = gameport;
diff --git a/drivers/input/joystick/grip_mp.c b/drivers/input/joystick/grip_mp.c
index 0da7bd133ccf..da17eee6f574 100644
--- a/drivers/input/joystick/grip_mp.c
+++ b/drivers/input/joystick/grip_mp.c
@@ -607,7 +607,7 @@ static int grip_connect(struct gameport *gameport, struct gameport_driver *drv)
607 struct grip_mp *grip; 607 struct grip_mp *grip;
608 int err; 608 int err;
609 609
610 if (!(grip = kcalloc(1, sizeof(struct grip_mp), GFP_KERNEL))) 610 if (!(grip = kzalloc(sizeof(struct grip_mp), GFP_KERNEL)))
611 return -ENOMEM; 611 return -ENOMEM;
612 612
613 grip->gameport = gameport; 613 grip->gameport = gameport;
diff --git a/drivers/input/joystick/guillemot.c b/drivers/input/joystick/guillemot.c
index f93da7bc082d..6a70ec429f06 100644
--- a/drivers/input/joystick/guillemot.c
+++ b/drivers/input/joystick/guillemot.c
@@ -183,7 +183,7 @@ static int guillemot_connect(struct gameport *gameport, struct gameport_driver *
183 int i, t; 183 int i, t;
184 int err; 184 int err;
185 185
186 if (!(guillemot = kcalloc(1, sizeof(struct guillemot), GFP_KERNEL))) 186 if (!(guillemot = kzalloc(sizeof(struct guillemot), GFP_KERNEL)))
187 return -ENOMEM; 187 return -ENOMEM;
188 188
189 guillemot->gameport = gameport; 189 guillemot->gameport = gameport;
diff --git a/drivers/input/joystick/interact.c b/drivers/input/joystick/interact.c
index 9d3f8c38cb09..d7b3472bd686 100644
--- a/drivers/input/joystick/interact.c
+++ b/drivers/input/joystick/interact.c
@@ -212,7 +212,7 @@ static int interact_connect(struct gameport *gameport, struct gameport_driver *d
212 int i, t; 212 int i, t;
213 int err; 213 int err;
214 214
215 if (!(interact = kcalloc(1, sizeof(struct interact), GFP_KERNEL))) 215 if (!(interact = kzalloc(sizeof(struct interact), GFP_KERNEL)))
216 return -ENOMEM; 216 return -ENOMEM;
217 217
218 interact->gameport = gameport; 218 interact->gameport = gameport;
diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
index 47144a7ed9e7..9e0353721a35 100644
--- a/drivers/input/joystick/sidewinder.c
+++ b/drivers/input/joystick/sidewinder.c
@@ -590,7 +590,7 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv)
590 590
591 comment[0] = 0; 591 comment[0] = 0;
592 592
593 sw = kcalloc(1, sizeof(struct sw), GFP_KERNEL); 593 sw = kzalloc(sizeof(struct sw), GFP_KERNEL);
594 buf = kmalloc(SW_LENGTH, GFP_KERNEL); 594 buf = kmalloc(SW_LENGTH, GFP_KERNEL);
595 idbuf = kmalloc(SW_LENGTH, GFP_KERNEL); 595 idbuf = kmalloc(SW_LENGTH, GFP_KERNEL);
596 if (!sw || !buf || !idbuf) { 596 if (!sw || !buf || !idbuf) {
diff --git a/drivers/input/joystick/tmdc.c b/drivers/input/joystick/tmdc.c
index 9eb9954cac6e..7431efc4330e 100644
--- a/drivers/input/joystick/tmdc.c
+++ b/drivers/input/joystick/tmdc.c
@@ -262,7 +262,7 @@ static int tmdc_connect(struct gameport *gameport, struct gameport_driver *drv)
262 int i, j, k, l, m; 262 int i, j, k, l, m;
263 int err; 263 int err;
264 264
265 if (!(tmdc = kcalloc(1, sizeof(struct tmdc), GFP_KERNEL))) 265 if (!(tmdc = kzalloc(sizeof(struct tmdc), GFP_KERNEL)))
266 return -ENOMEM; 266 return -ENOMEM;
267 267
268 tmdc->gameport = gameport; 268 tmdc->gameport = gameport;
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 28100d461cb7..0c5b9c8297cd 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -178,7 +178,7 @@ static struct tgfx __init *tgfx_probe(int *config, int nargs)
178 return NULL; 178 return NULL;
179 } 179 }
180 180
181 if (!(tgfx = kcalloc(1, sizeof(struct tgfx), GFP_KERNEL))) { 181 if (!(tgfx = kzalloc(sizeof(struct tgfx), GFP_KERNEL))) {
182 parport_put_port(pp); 182 parport_put_port(pp);
183 return NULL; 183 return NULL;
184 } 184 }
diff --git a/drivers/input/keyboard/corgikbd.c b/drivers/input/keyboard/corgikbd.c
index a8551711e8d6..cd4b6e795013 100644
--- a/drivers/input/keyboard/corgikbd.c
+++ b/drivers/input/keyboard/corgikbd.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/input.h> 17#include <linux/input.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/jiffies.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <asm/irq.h> 22#include <asm/irq.h>
@@ -32,7 +33,6 @@
32/* zero code, 124 scancodes + 3 hinge combinations */ 33/* zero code, 124 scancodes + 3 hinge combinations */
33#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 +3 ) 34#define NR_SCANCODES ( SCANCODE(KB_ROWS-1,KB_COLS-1) +1 +1 +3 )
34#define SCAN_INTERVAL (HZ/10) 35#define SCAN_INTERVAL (HZ/10)
35#define CORGIKBD_PRESSED 1
36 36
37#define HINGE_SCAN_INTERVAL (HZ/4) 37#define HINGE_SCAN_INTERVAL (HZ/4)
38 38
@@ -73,25 +73,13 @@ struct corgikbd {
73 struct input_dev input; 73 struct input_dev input;
74 char phys[32]; 74 char phys[32];
75 75
76 unsigned char state[ARRAY_SIZE(corgikbd_keycode)];
77 spinlock_t lock; 76 spinlock_t lock;
78
79 struct timer_list timer; 77 struct timer_list timer;
80 struct timer_list htimer; 78 struct timer_list htimer;
81};
82 79
83static void handle_scancode(unsigned int pressed,unsigned int scancode, struct corgikbd *corgikbd_data) 80 unsigned int suspended;
84{ 81 unsigned long suspend_jiffies;
85 if (pressed && !(corgikbd_data->state[scancode] & CORGIKBD_PRESSED)) { 82};
86 corgikbd_data->state[scancode] |= CORGIKBD_PRESSED;
87 input_report_key(&corgikbd_data->input, corgikbd_data->keycode[scancode], 1);
88 if (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
89 input_event(&corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
90 } else if (!pressed && corgikbd_data->state[scancode] & CORGIKBD_PRESSED) {
91 corgikbd_data->state[scancode] &= ~CORGIKBD_PRESSED;
92 input_report_key(&corgikbd_data->input, corgikbd_data->keycode[scancode], 0);
93 }
94}
95 83
96#define KB_DISCHARGE_DELAY 10 84#define KB_DISCHARGE_DELAY 10
97#define KB_ACTIVATE_DELAY 10 85#define KB_ACTIVATE_DELAY 10
@@ -105,36 +93,36 @@ static void handle_scancode(unsigned int pressed,unsigned int scancode, struct c
105 */ 93 */
106static inline void corgikbd_discharge_all(void) 94static inline void corgikbd_discharge_all(void)
107{ 95{
108 // STROBE All HiZ 96 /* STROBE All HiZ */
109 GPCR2 = CORGI_GPIO_ALL_STROBE_BIT; 97 GPCR2 = CORGI_GPIO_ALL_STROBE_BIT;
110 GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT; 98 GPDR2 &= ~CORGI_GPIO_ALL_STROBE_BIT;
111} 99}
112 100
113static inline void corgikbd_activate_all(void) 101static inline void corgikbd_activate_all(void)
114{ 102{
115 // STROBE ALL -> High 103 /* STROBE ALL -> High */
116 GPSR2 = CORGI_GPIO_ALL_STROBE_BIT; 104 GPSR2 = CORGI_GPIO_ALL_STROBE_BIT;
117 GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT; 105 GPDR2 |= CORGI_GPIO_ALL_STROBE_BIT;
118 106
119 udelay(KB_DISCHARGE_DELAY); 107 udelay(KB_DISCHARGE_DELAY);
120 108
121 // Clear any interrupts we may have triggered when altering the GPIO lines 109 /* Clear any interrupts we may have triggered when altering the GPIO lines */
122 GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT; 110 GEDR1 = CORGI_GPIO_HIGH_SENSE_BIT;
123 GEDR2 = CORGI_GPIO_LOW_SENSE_BIT; 111 GEDR2 = CORGI_GPIO_LOW_SENSE_BIT;
124} 112}
125 113
126static inline void corgikbd_activate_col(int col) 114static inline void corgikbd_activate_col(int col)
127{ 115{
128 // STROBE col -> High, not col -> HiZ 116 /* STROBE col -> High, not col -> HiZ */
129 GPSR2 = CORGI_GPIO_STROBE_BIT(col); 117 GPSR2 = CORGI_GPIO_STROBE_BIT(col);
130 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col); 118 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
131} 119}
132 120
133static inline void corgikbd_reset_col(int col) 121static inline void corgikbd_reset_col(int col)
134{ 122{
135 // STROBE col -> Low 123 /* STROBE col -> Low */
136 GPCR2 = CORGI_GPIO_STROBE_BIT(col); 124 GPCR2 = CORGI_GPIO_STROBE_BIT(col);
137 // STROBE col -> out, not col -> HiZ 125 /* STROBE col -> out, not col -> HiZ */
138 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col); 126 GPDR2 = (GPDR2 & ~CORGI_GPIO_ALL_STROBE_BIT) | CORGI_GPIO_STROBE_BIT(col);
139} 127}
140 128
@@ -149,10 +137,13 @@ static inline void corgikbd_reset_col(int col)
149/* Scan the hardware keyboard and push any changes up through the input layer */ 137/* Scan the hardware keyboard and push any changes up through the input layer */
150static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs *regs) 138static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs *regs)
151{ 139{
152 unsigned int row, col, rowd, scancode; 140 unsigned int row, col, rowd;
153 unsigned long flags; 141 unsigned long flags;
154 unsigned int num_pressed; 142 unsigned int num_pressed;
155 143
144 if (corgikbd_data->suspended)
145 return;
146
156 spin_lock_irqsave(&corgikbd_data->lock, flags); 147 spin_lock_irqsave(&corgikbd_data->lock, flags);
157 148
158 if (regs) 149 if (regs)
@@ -173,10 +164,21 @@ static void corgikbd_scankeyboard(struct corgikbd *corgikbd_data, struct pt_regs
173 164
174 rowd = GET_ROWS_STATUS(col); 165 rowd = GET_ROWS_STATUS(col);
175 for (row = 0; row < KB_ROWS; row++) { 166 for (row = 0; row < KB_ROWS; row++) {
167 unsigned int scancode, pressed;
168
176 scancode = SCANCODE(row, col); 169 scancode = SCANCODE(row, col);
177 handle_scancode((rowd & KB_ROWMASK(row)), scancode, corgikbd_data); 170 pressed = rowd & KB_ROWMASK(row);
178 if (rowd & KB_ROWMASK(row)) 171
172 input_report_key(&corgikbd_data->input, corgikbd_data->keycode[scancode], pressed);
173
174 if (pressed)
179 num_pressed++; 175 num_pressed++;
176
177 if (pressed && (corgikbd_data->keycode[scancode] == CORGI_KEY_OFF)
178 && time_after(jiffies, corgikbd_data->suspend_jiffies + HZ)) {
179 input_event(&corgikbd_data->input, EV_PWR, CORGI_KEY_OFF, 1);
180 corgikbd_data->suspend_jiffies=jiffies;
181 }
180 } 182 }
181 corgikbd_reset_col(col); 183 corgikbd_reset_col(col);
182 } 184 }
@@ -221,8 +223,11 @@ static void corgikbd_timer_callback(unsigned long data)
221 * The hinge switches generate no interrupt so they need to be 223 * The hinge switches generate no interrupt so they need to be
222 * monitored by a timer. 224 * monitored by a timer.
223 * 225 *
224 * When we detect changes, we debounce it and then pass the three 226 * We debounce the switches and pass them to the input system.
225 * positions the system can take as keypresses to the input system. 227 *
228 * gprr == 0x00 - Keyboard with Landscape Screen
229 * 0x08 - No Keyboard with Portrait Screen
230 * 0x0c - Keyboard and Screen Closed
226 */ 231 */
227 232
228#define HINGE_STABLE_COUNT 2 233#define HINGE_STABLE_COUNT 2
@@ -235,7 +240,7 @@ static void corgikbd_hinge_timer(unsigned long data)
235 unsigned long gprr; 240 unsigned long gprr;
236 unsigned long flags; 241 unsigned long flags;
237 242
238 gprr = read_scoop_reg(SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB); 243 gprr = read_scoop_reg(&corgiscoop_device.dev, SCOOP_GPRR) & (CORGI_SCP_SWA | CORGI_SCP_SWB);
239 if (gprr != sharpsl_hinge_state) { 244 if (gprr != sharpsl_hinge_state) {
240 hinge_count = 0; 245 hinge_count = 0;
241 sharpsl_hinge_state = gprr; 246 sharpsl_hinge_state = gprr;
@@ -244,9 +249,8 @@ static void corgikbd_hinge_timer(unsigned long data)
244 if (hinge_count >= HINGE_STABLE_COUNT) { 249 if (hinge_count >= HINGE_STABLE_COUNT) {
245 spin_lock_irqsave(&corgikbd_data->lock, flags); 250 spin_lock_irqsave(&corgikbd_data->lock, flags);
246 251
247 handle_scancode((sharpsl_hinge_state == 0x00), 125, corgikbd_data); /* Keyboard with Landscape Screen */ 252 input_report_switch(&corgikbd_data->input, SW_0, ((sharpsl_hinge_state & CORGI_SCP_SWA) != 0));
248 handle_scancode((sharpsl_hinge_state == 0x08), 126, corgikbd_data); /* No Keyboard with Portrait Screen */ 253 input_report_switch(&corgikbd_data->input, SW_1, ((sharpsl_hinge_state & CORGI_SCP_SWB) != 0));
249 handle_scancode((sharpsl_hinge_state == 0x0c), 127, corgikbd_data); /* Keyboard and Screen Closed */
250 input_sync(&corgikbd_data->input); 254 input_sync(&corgikbd_data->input);
251 255
252 spin_unlock_irqrestore(&corgikbd_data->lock, flags); 256 spin_unlock_irqrestore(&corgikbd_data->lock, flags);
@@ -255,19 +259,45 @@ static void corgikbd_hinge_timer(unsigned long data)
255 mod_timer(&corgikbd_data->htimer, jiffies + HINGE_SCAN_INTERVAL); 259 mod_timer(&corgikbd_data->htimer, jiffies + HINGE_SCAN_INTERVAL);
256} 260}
257 261
262#ifdef CONFIG_PM
263static int corgikbd_suspend(struct device *dev, pm_message_t state, uint32_t level)
264{
265 if (level == SUSPEND_POWER_DOWN) {
266 struct corgikbd *corgikbd = dev_get_drvdata(dev);
267 corgikbd->suspended = 1;
268 }
269 return 0;
270}
271
272static int corgikbd_resume(struct device *dev, uint32_t level)
273{
274 if (level == RESUME_POWER_ON) {
275 struct corgikbd *corgikbd = dev_get_drvdata(dev);
276
277 /* Upon resume, ignore the suspend key for a short while */
278 corgikbd->suspend_jiffies=jiffies;
279 corgikbd->suspended = 0;
280 }
281 return 0;
282}
283#else
284#define corgikbd_suspend NULL
285#define corgikbd_resume NULL
286#endif
287
258static int __init corgikbd_probe(struct device *dev) 288static int __init corgikbd_probe(struct device *dev)
259{ 289{
260 int i; 290 int i;
261 struct corgikbd *corgikbd; 291 struct corgikbd *corgikbd;
262 292
263 corgikbd = kcalloc(1, sizeof(struct corgikbd), GFP_KERNEL); 293 corgikbd = kzalloc(sizeof(struct corgikbd), GFP_KERNEL);
264 if (!corgikbd) 294 if (!corgikbd)
265 return -ENOMEM; 295 return -ENOMEM;
266 296
267 dev_set_drvdata(dev,corgikbd); 297 dev_set_drvdata(dev,corgikbd);
268 strcpy(corgikbd->phys, "corgikbd/input0"); 298 strcpy(corgikbd->phys, "corgikbd/input0");
269 299
270 spin_lock_init(corgikbd->lock); 300 spin_lock_init(&corgikbd->lock);
271 301
272 /* Init Keyboard rescan timer */ 302 /* Init Keyboard rescan timer */
273 init_timer(&corgikbd->timer); 303 init_timer(&corgikbd->timer);
@@ -279,6 +309,8 @@ static int __init corgikbd_probe(struct device *dev)
279 corgikbd->htimer.function = corgikbd_hinge_timer; 309 corgikbd->htimer.function = corgikbd_hinge_timer;
280 corgikbd->htimer.data = (unsigned long) corgikbd; 310 corgikbd->htimer.data = (unsigned long) corgikbd;
281 311
312 corgikbd->suspend_jiffies=jiffies;
313
282 init_input_dev(&corgikbd->input); 314 init_input_dev(&corgikbd->input);
283 corgikbd->input.private = corgikbd; 315 corgikbd->input.private = corgikbd;
284 corgikbd->input.name = "Corgi Keyboard"; 316 corgikbd->input.name = "Corgi Keyboard";
@@ -288,7 +320,7 @@ static int __init corgikbd_probe(struct device *dev)
288 corgikbd->input.id.vendor = 0x0001; 320 corgikbd->input.id.vendor = 0x0001;
289 corgikbd->input.id.product = 0x0001; 321 corgikbd->input.id.product = 0x0001;
290 corgikbd->input.id.version = 0x0100; 322 corgikbd->input.id.version = 0x0100;
291 corgikbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP) | BIT(EV_PWR); 323 corgikbd->input.evbit[0] = BIT(EV_KEY) | BIT(EV_REP) | BIT(EV_PWR) | BIT(EV_SW);
292 corgikbd->input.keycode = corgikbd->keycode; 324 corgikbd->input.keycode = corgikbd->keycode;
293 corgikbd->input.keycodesize = sizeof(unsigned char); 325 corgikbd->input.keycodesize = sizeof(unsigned char);
294 corgikbd->input.keycodemax = ARRAY_SIZE(corgikbd_keycode); 326 corgikbd->input.keycodemax = ARRAY_SIZE(corgikbd_keycode);
@@ -297,6 +329,8 @@ static int __init corgikbd_probe(struct device *dev)
297 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++) 329 for (i = 0; i < ARRAY_SIZE(corgikbd_keycode); i++)
298 set_bit(corgikbd->keycode[i], corgikbd->input.keybit); 330 set_bit(corgikbd->keycode[i], corgikbd->input.keybit);
299 clear_bit(0, corgikbd->input.keybit); 331 clear_bit(0, corgikbd->input.keybit);
332 set_bit(SW_0, corgikbd->input.swbit);
333 set_bit(SW_1, corgikbd->input.swbit);
300 334
301 input_register_device(&corgikbd->input); 335 input_register_device(&corgikbd->input);
302 mod_timer(&corgikbd->htimer, jiffies + HINGE_SCAN_INTERVAL); 336 mod_timer(&corgikbd->htimer, jiffies + HINGE_SCAN_INTERVAL);
@@ -343,6 +377,8 @@ static struct device_driver corgikbd_driver = {
343 .bus = &platform_bus_type, 377 .bus = &platform_bus_type,
344 .probe = corgikbd_probe, 378 .probe = corgikbd_probe,
345 .remove = corgikbd_remove, 379 .remove = corgikbd_remove,
380 .suspend = corgikbd_suspend,
381 .resume = corgikbd_resume,
346}; 382};
347 383
348static int __devinit corgikbd_init(void) 384static int __devinit corgikbd_init(void)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 2bb2fe78bdca..12bdd3eff923 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -883,7 +883,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
883 psmouse_deactivate(parent); 883 psmouse_deactivate(parent);
884 } 884 }
885 885
886 if (!(psmouse = kcalloc(1, sizeof(struct psmouse), GFP_KERNEL))) { 886 if (!(psmouse = kzalloc(sizeof(struct psmouse), GFP_KERNEL))) {
887 retval = -ENOMEM; 887 retval = -ENOMEM;
888 goto out; 888 goto out;
889 } 889 }
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 79ca38469159..1bd88fca0542 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -87,7 +87,7 @@ static int serport_ldisc_open(struct tty_struct *tty)
87 if (!capable(CAP_SYS_ADMIN)) 87 if (!capable(CAP_SYS_ADMIN))
88 return -EPERM; 88 return -EPERM;
89 89
90 serport = kcalloc(1, sizeof(struct serport), GFP_KERNEL); 90 serport = kzalloc(sizeof(struct serport), GFP_KERNEL);
91 if (!serport) 91 if (!serport)
92 return -ENOMEM; 92 return -ENOMEM;
93 93
@@ -165,7 +165,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
165 if (test_and_set_bit(SERPORT_BUSY, &serport->flags)) 165 if (test_and_set_bit(SERPORT_BUSY, &serport->flags))
166 return -EBUSY; 166 return -EBUSY;
167 167
168 serport->serio = serio = kcalloc(1, sizeof(struct serio), GFP_KERNEL); 168 serport->serio = serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
169 if (!serio) 169 if (!serio)
170 return -ENOMEM; 170 return -ENOMEM;
171 171
diff --git a/drivers/input/touchscreen/corgi_ts.c b/drivers/input/touchscreen/corgi_ts.c
index 3f8b61cfbc37..5d19261b884f 100644
--- a/drivers/input/touchscreen/corgi_ts.c
+++ b/drivers/input/touchscreen/corgi_ts.c
@@ -53,11 +53,8 @@ struct corgi_ts {
53 53
54#define SyncHS() while((STATUS_HSYNC) == 0); while((STATUS_HSYNC) != 0); 54#define SyncHS() while((STATUS_HSYNC) == 0); while((STATUS_HSYNC) != 0);
55#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a)) 55#define CCNT(a) asm volatile ("mrc p14, 0, %0, C1, C0, 0" : "=r"(a))
56#define CCNT_ON() {int pmnc = 1; asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(pmnc));} 56#define PMNC_GET(x) asm volatile ("mrc p14, 0, %0, C0, C0, 0" : "=r"(x))
57#define CCNT_OFF() {int pmnc = 0; asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(pmnc));} 57#define PMNC_SET(x) asm volatile ("mcr p14, 0, %0, C0, C0, 0" : : "r"(x))
58
59#define WAIT_HS_400_VGA 7013U // 17.615us
60#define WAIT_HS_400_QVGA 16622U // 41.750us
61 58
62 59
63/* ADS7846 Touch Screen Controller bit definitions */ 60/* ADS7846 Touch Screen Controller bit definitions */
@@ -69,41 +66,29 @@ struct corgi_ts {
69#define ADSCTRL_STS (1u << 7) /* Start Bit */ 66#define ADSCTRL_STS (1u << 7) /* Start Bit */
70 67
71/* External Functions */ 68/* External Functions */
72extern int w100fb_get_xres(void); 69extern unsigned long w100fb_get_hsynclen(struct device *dev);
73extern int w100fb_get_blanking(void);
74extern int w100fb_get_fastsysclk(void);
75extern unsigned int get_clk_frequency_khz(int info); 70extern unsigned int get_clk_frequency_khz(int info);
76 71
77static unsigned long calc_waittime(void) 72static unsigned long calc_waittime(void)
78{ 73{
79 int w100fb_xres = w100fb_get_xres(); 74 unsigned long hsync_len = w100fb_get_hsynclen(&corgifb_device.dev);
80 unsigned int waittime = 0;
81
82 if (w100fb_xres == 480 || w100fb_xres == 640) {
83 waittime = WAIT_HS_400_VGA * get_clk_frequency_khz(0) / 398131U;
84
85 if (w100fb_get_fastsysclk() == 100)
86 waittime = waittime * 75 / 100;
87
88 if (w100fb_xres == 640)
89 waittime *= 3;
90 75
91 return waittime; 76 if (hsync_len)
92 } 77 return get_clk_frequency_khz(0)*1000/hsync_len;
93 78 else
94 return WAIT_HS_400_QVGA * get_clk_frequency_khz(0) / 398131U; 79 return 0;
95} 80}
96 81
97static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int address, unsigned long wait_time) 82static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int address, unsigned long wait_time)
98{ 83{
84 unsigned long timer1 = 0, timer2, pmnc = 0;
99 int pos = 0; 85 int pos = 0;
100 unsigned long timer1 = 0, timer2;
101 int dosleep;
102 86
103 dosleep = !w100fb_get_blanking(); 87 if (wait_time && doSend) {
88 PMNC_GET(pmnc);
89 if (!(pmnc & 0x01))
90 PMNC_SET(0x01);
104 91
105 if (dosleep && doSend) {
106 CCNT_ON();
107 /* polling HSync */ 92 /* polling HSync */
108 SyncHS(); 93 SyncHS();
109 /* get CCNT */ 94 /* get CCNT */
@@ -119,11 +104,11 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
119 corgi_ssp_ads7846_put(cmd); 104 corgi_ssp_ads7846_put(cmd);
120 corgi_ssp_ads7846_get(); 105 corgi_ssp_ads7846_get();
121 106
122 if (dosleep) { 107 if (wait_time) {
123 /* Wait after HSync */ 108 /* Wait after HSync */
124 CCNT(timer2); 109 CCNT(timer2);
125 if (timer2-timer1 > wait_time) { 110 if (timer2-timer1 > wait_time) {
126 /* timeout */ 111 /* too slow - timeout, try again */
127 SyncHS(); 112 SyncHS();
128 /* get OSCR */ 113 /* get OSCR */
129 CCNT(timer1); 114 CCNT(timer1);
@@ -134,8 +119,8 @@ static int sync_receive_data_send_cmd(int doRecive, int doSend, unsigned int add
134 CCNT(timer2); 119 CCNT(timer2);
135 } 120 }
136 corgi_ssp_ads7846_put(cmd); 121 corgi_ssp_ads7846_put(cmd);
137 if (dosleep) 122 if (wait_time && !(pmnc & 0x01))
138 CCNT_OFF(); 123 PMNC_SET(pmnc);
139 } 124 }
140 return pos; 125 return pos;
141} 126}
@@ -244,7 +229,7 @@ static irqreturn_t ts_interrupt(int irq, void *dev_id, struct pt_regs *regs)
244} 229}
245 230
246#ifdef CONFIG_PM 231#ifdef CONFIG_PM
247static int corgits_suspend(struct device *dev, uint32_t state, uint32_t level) 232static int corgits_suspend(struct device *dev, pm_message_t state, uint32_t level)
248{ 233{
249 if (level == SUSPEND_POWER_DOWN) { 234 if (level == SUSPEND_POWER_DOWN) {
250 struct corgi_ts *corgi_ts = dev_get_drvdata(dev); 235 struct corgi_ts *corgi_ts = dev_get_drvdata(dev);
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 17cf7663c582..26c545fa223b 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -10,7 +10,6 @@
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <linux/fs.h> 11#include <linux/fs.h>
12#include <linux/major.h> 12#include <linux/major.h>
13#include <asm/segment.h>
14#include <asm/io.h> 13#include <asm/io.h>
15#include <linux/delay.h> 14#include <linux/delay.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
@@ -1242,6 +1241,8 @@ struct IsdnCardState {
1242 1241
1243#ifdef CONFIG_HISAX_ENTERNOW_PCI 1242#ifdef CONFIG_HISAX_ENTERNOW_PCI
1244#define CARD_FN_ENTERNOW_PCI 1 1243#define CARD_FN_ENTERNOW_PCI 1
1244#else
1245#define CARD_FN_ENTERNOW_PCI 0
1245#endif 1246#endif
1246 1247
1247#define TEI_PER_CARD 1 1248#define TEI_PER_CARD 1
diff --git a/drivers/isdn/i4l/isdn_v110.c b/drivers/isdn/i4l/isdn_v110.c
index f47f2b9846d8..38619e8cd823 100644
--- a/drivers/isdn/i4l/isdn_v110.c
+++ b/drivers/isdn/i4l/isdn_v110.c
@@ -516,11 +516,11 @@ buffer_full:
516} 516}
517 517
518int 518int
519isdn_v110_stat_callback(int idx, isdn_ctrl * c) 519isdn_v110_stat_callback(int idx, isdn_ctrl *c)
520{ 520{
521 isdn_v110_stream *v = NULL; 521 isdn_v110_stream *v = NULL;
522 int i; 522 int i;
523 int ret; 523 int ret = 0;
524 524
525 if (idx < 0) 525 if (idx < 0)
526 return 0; 526 return 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 45754bb6a799..9de000131a8a 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -239,6 +239,11 @@ static void vm_dp_init(struct dpages *dp, void *data)
239 dp->context_ptr = data; 239 dp->context_ptr = data;
240} 240}
241 241
242static void dm_bio_destructor(struct bio *bio)
243{
244 bio_free(bio, _bios);
245}
246
242/*----------------------------------------------------------------- 247/*-----------------------------------------------------------------
243 * IO routines that accept a list of pages. 248 * IO routines that accept a list of pages.
244 *---------------------------------------------------------------*/ 249 *---------------------------------------------------------------*/
@@ -263,6 +268,7 @@ static void do_region(int rw, unsigned int region, struct io_region *where,
263 bio->bi_bdev = where->bdev; 268 bio->bi_bdev = where->bdev;
264 bio->bi_end_io = endio; 269 bio->bi_end_io = endio;
265 bio->bi_private = io; 270 bio->bi_private = io;
271 bio->bi_destructor = dm_bio_destructor;
266 bio_set_region(bio, region); 272 bio_set_region(bio, region);
267 273
268 /* 274 /*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d487d9deb98e..930b9fc27953 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -399,6 +399,11 @@ struct clone_info {
399 unsigned short idx; 399 unsigned short idx;
400}; 400};
401 401
402static void dm_bio_destructor(struct bio *bio)
403{
404 bio_free(bio, dm_set);
405}
406
402/* 407/*
403 * Creates a little bio that is just does part of a bvec. 408 * Creates a little bio that is just does part of a bvec.
404 */ 409 */
@@ -410,6 +415,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
410 struct bio_vec *bv = bio->bi_io_vec + idx; 415 struct bio_vec *bv = bio->bi_io_vec + idx;
411 416
412 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set); 417 clone = bio_alloc_bioset(GFP_NOIO, 1, dm_set);
418 clone->bi_destructor = dm_bio_destructor;
413 *clone->bi_io_vec = *bv; 419 *clone->bi_io_vec = *bv;
414 420
415 clone->bi_sector = sector; 421 clone->bi_sector = sector;
diff --git a/drivers/media/dvb/bt8xx/Kconfig b/drivers/media/dvb/bt8xx/Kconfig
index b12545f093f8..1e85d16491b0 100644
--- a/drivers/media/dvb/bt8xx/Kconfig
+++ b/drivers/media/dvb/bt8xx/Kconfig
@@ -1,5 +1,5 @@
1config DVB_BT8XX 1config DVB_BT8XX
2 tristate "Nebula/Pinnacle PCTV/Twinhan PCI cards" 2 tristate "BT8xx based PCI cards"
3 depends on DVB_CORE && PCI && VIDEO_BT848 3 depends on DVB_CORE && PCI && VIDEO_BT848
4 select DVB_MT352 4 select DVB_MT352
5 select DVB_SP887X 5 select DVB_SP887X
@@ -8,8 +8,8 @@ config DVB_BT8XX
8 select DVB_OR51211 8 select DVB_OR51211
9 help 9 help
10 Support for PCI cards based on the Bt8xx PCI bridge. Examples are 10 Support for PCI cards based on the Bt8xx PCI bridge. Examples are
11 the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards and 11 the Nebula cards, the Pinnacle PCTV cards, the Twinhan DST cards,
12 pcHDTV HD2000 cards. 12 the pcHDTV HD2000 cards, and certain AVerMedia cards.
13 13
14 Since these cards have no MPEG decoder onboard, they transmit 14 Since these cards have no MPEG decoder onboard, they transmit
15 only compressed MPEG data over the PCI bus, so you need 15 only compressed MPEG data over the PCI bus, so you need
diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c
index 1f1cd7a8d500..7142b9c51dd2 100644
--- a/drivers/media/dvb/frontends/lgdt330x.c
+++ b/drivers/media/dvb/frontends/lgdt330x.c
@@ -69,8 +69,8 @@ struct lgdt330x_state
69}; 69};
70 70
71static int i2c_write_demod_bytes (struct lgdt330x_state* state, 71static int i2c_write_demod_bytes (struct lgdt330x_state* state,
72 u8 *buf, /* data bytes to send */ 72 u8 *buf, /* data bytes to send */
73 int len /* number of bytes to send */ ) 73 int len /* number of bytes to send */ )
74{ 74{
75 struct i2c_msg msg = 75 struct i2c_msg msg =
76 { .addr = state->config->demod_address, 76 { .addr = state->config->demod_address,
@@ -129,13 +129,13 @@ static int lgdt3302_SwReset(struct lgdt330x_state* state)
129 }; 129 };
130 130
131 ret = i2c_write_demod_bytes(state, 131 ret = i2c_write_demod_bytes(state,
132 reset, sizeof(reset)); 132 reset, sizeof(reset));
133 if (ret == 0) { 133 if (ret == 0) {
134 134
135 /* force reset high (inactive) and unmask interrupts */ 135 /* force reset high (inactive) and unmask interrupts */
136 reset[1] = 0x7f; 136 reset[1] = 0x7f;
137 ret = i2c_write_demod_bytes(state, 137 ret = i2c_write_demod_bytes(state,
138 reset, sizeof(reset)); 138 reset, sizeof(reset));
139 } 139 }
140 return ret; 140 return ret;
141} 141}
@@ -149,13 +149,13 @@ static int lgdt3303_SwReset(struct lgdt330x_state* state)
149 }; 149 };
150 150
151 ret = i2c_write_demod_bytes(state, 151 ret = i2c_write_demod_bytes(state,
152 reset, sizeof(reset)); 152 reset, sizeof(reset));
153 if (ret == 0) { 153 if (ret == 0) {
154 154
155 /* force reset high (inactive) */ 155 /* force reset high (inactive) */
156 reset[1] = 0x01; 156 reset[1] = 0x01;
157 ret = i2c_write_demod_bytes(state, 157 ret = i2c_write_demod_bytes(state,
158 reset, sizeof(reset)); 158 reset, sizeof(reset));
159 } 159 }
160 return ret; 160 return ret;
161} 161}
@@ -172,7 +172,6 @@ static int lgdt330x_SwReset(struct lgdt330x_state* state)
172 } 172 }
173} 173}
174 174
175
176static int lgdt330x_init(struct dvb_frontend* fe) 175static int lgdt330x_init(struct dvb_frontend* fe)
177{ 176{
178 /* Hardware reset is done using gpio[0] of cx23880x chip. 177 /* Hardware reset is done using gpio[0] of cx23880x chip.
@@ -229,13 +228,13 @@ static int lgdt330x_init(struct dvb_frontend* fe)
229 case LGDT3302: 228 case LGDT3302:
230 chip_name = "LGDT3302"; 229 chip_name = "LGDT3302";
231 err = i2c_write_demod_bytes(state, lgdt3302_init_data, 230 err = i2c_write_demod_bytes(state, lgdt3302_init_data,
232 sizeof(lgdt3302_init_data)); 231 sizeof(lgdt3302_init_data));
233 break; 232 break;
234 case LGDT3303: 233 case LGDT3303:
235 chip_name = "LGDT3303"; 234 chip_name = "LGDT3303";
236 err = i2c_write_demod_bytes(state, lgdt3303_init_data, 235 err = i2c_write_demod_bytes(state, lgdt3303_init_data,
237 sizeof(lgdt3303_init_data)); 236 sizeof(lgdt3303_init_data));
238 break; 237 break;
239 default: 238 default:
240 chip_name = "undefined"; 239 chip_name = "undefined";
241 printk (KERN_WARNING "Only LGDT3302 and LGDT3303 are supported chips.\n"); 240 printk (KERN_WARNING "Only LGDT3302 and LGDT3303 are supported chips.\n");
@@ -262,15 +261,15 @@ static int lgdt330x_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
262 switch (state->config->demod_chip) { 261 switch (state->config->demod_chip) {
263 case LGDT3302: 262 case LGDT3302:
264 err = i2c_read_demod_bytes(state, LGDT3302_PACKET_ERR_COUNTER1, 263 err = i2c_read_demod_bytes(state, LGDT3302_PACKET_ERR_COUNTER1,
265 buf, sizeof(buf)); 264 buf, sizeof(buf));
266 break; 265 break;
267 case LGDT3303: 266 case LGDT3303:
268 err = i2c_read_demod_bytes(state, LGDT3303_PACKET_ERR_COUNTER1, 267 err = i2c_read_demod_bytes(state, LGDT3303_PACKET_ERR_COUNTER1,
269 buf, sizeof(buf)); 268 buf, sizeof(buf));
270 break; 269 break;
271 default: 270 default:
272 printk(KERN_WARNING 271 printk(KERN_WARNING
273 "Only LGDT3302 and LGDT3303 are supported chips.\n"); 272 "Only LGDT3302 and LGDT3303 are supported chips.\n");
274 err = -ENODEV; 273 err = -ENODEV;
275 } 274 }
276 275
@@ -330,7 +329,7 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
330 329
331 if (state->config->demod_chip == LGDT3303) { 330 if (state->config->demod_chip == LGDT3303) {
332 err = i2c_write_demod_bytes(state, lgdt3303_8vsb_44_data, 331 err = i2c_write_demod_bytes(state, lgdt3303_8vsb_44_data,
333 sizeof(lgdt3303_8vsb_44_data)); 332 sizeof(lgdt3303_8vsb_44_data));
334 } 333 }
335 break; 334 break;
336 335
@@ -378,18 +377,19 @@ static int lgdt330x_set_parameters(struct dvb_frontend* fe,
378 377
379 /* Select the requested mode */ 378 /* Select the requested mode */
380 i2c_write_demod_bytes(state, top_ctrl_cfg, 379 i2c_write_demod_bytes(state, top_ctrl_cfg,
381 sizeof(top_ctrl_cfg)); 380 sizeof(top_ctrl_cfg));
382 state->config->set_ts_params(fe, 0); 381 if (state->config->set_ts_params)
382 state->config->set_ts_params(fe, 0);
383 state->current_modulation = param->u.vsb.modulation; 383 state->current_modulation = param->u.vsb.modulation;
384 } 384 }
385 385
386 /* Change only if we are actually changing the channel */ 386 /* Tune to the specified frequency */
387 if (state->current_frequency != param->frequency) { 387 if (state->config->pll_set)
388 /* Tune to the new frequency */
389 state->config->pll_set(fe, param); 388 state->config->pll_set(fe, param);
390 /* Keep track of the new frequency */ 389
391 state->current_frequency = param->frequency; 390 /* Keep track of the new frequency */
392 } 391 state->current_frequency = param->frequency;
392
393 lgdt330x_SwReset(state); 393 lgdt330x_SwReset(state);
394 return 0; 394 return 0;
395} 395}
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 810e7aac0a53..3e6f5347da21 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_VIDEO_ZORAN_LML33R10) += saa7114.o adv7170.o zr36060.o
29obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o 29obj-$(CONFIG_VIDEO_ZORAN) += zr36067.o videocodec.o
30obj-$(CONFIG_VIDEO_PMS) += pms.o 30obj-$(CONFIG_VIDEO_PMS) += pms.o
31obj-$(CONFIG_VIDEO_PLANB) += planb.o 31obj-$(CONFIG_VIDEO_PLANB) += planb.o
32obj-$(CONFIG_VIDEO_VINO) += vino.o 32obj-$(CONFIG_VIDEO_VINO) += vino.o saa7191.o indycam.o
33obj-$(CONFIG_VIDEO_STRADIS) += stradis.o 33obj-$(CONFIG_VIDEO_STRADIS) += stradis.o
34obj-$(CONFIG_VIDEO_CPIA) += cpia.o 34obj-$(CONFIG_VIDEO_CPIA) += cpia.o
35obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o 35obj-$(CONFIG_VIDEO_CPIA_PP) += cpia_pp.o
diff --git a/drivers/media/video/adv7170.c b/drivers/media/video/adv7170.c
index 52e32f05d625..1ca2b67aedfb 100644
--- a/drivers/media/video/adv7170.c
+++ b/drivers/media/video/adv7170.c
@@ -43,7 +43,6 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <asm/segment.h>
47#include <linux/types.h> 46#include <linux/types.h>
48 47
49#include <linux/videodev.h> 48#include <linux/videodev.h>
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index b5ed9544bdea..173bca1e0295 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -39,7 +39,6 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <asm/segment.h>
43#include <linux/types.h> 42#include <linux/types.h>
44 43
45#include <linux/videodev.h> 44#include <linux/videodev.h>
diff --git a/drivers/media/video/bt819.c b/drivers/media/video/bt819.c
index c6cfa7c48b04..3ee0afca76a7 100644
--- a/drivers/media/video/bt819.c
+++ b/drivers/media/video/bt819.c
@@ -43,7 +43,6 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <asm/segment.h>
47#include <linux/types.h> 46#include <linux/types.h>
48 47
49#include <linux/videodev.h> 48#include <linux/videodev.h>
diff --git a/drivers/media/video/bt856.c b/drivers/media/video/bt856.c
index c13d28658868..8eb871d0e85b 100644
--- a/drivers/media/video/bt856.c
+++ b/drivers/media/video/bt856.c
@@ -43,7 +43,6 @@
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/page.h> 44#include <asm/page.h>
45#include <linux/sched.h> 45#include <linux/sched.h>
46#include <asm/segment.h>
47#include <linux/types.h> 46#include <linux/types.h>
48 47
49#include <linux/videodev.h> 48#include <linux/videodev.h>
diff --git a/drivers/media/video/indycam.c b/drivers/media/video/indycam.c
new file mode 100644
index 000000000000..b2b0384cd4b9
--- /dev/null
+++ b/drivers/media/video/indycam.c
@@ -0,0 +1,412 @@
1/*
2 * indycam.c - Silicon Graphics IndyCam digital camera driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/errno.h>
16#include <linux/fs.h>
17#include <linux/kernel.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/sched.h>
22
23#include <linux/videodev.h>
24/* IndyCam decodes stream of photons into digital image representation ;-) */
25#include <linux/video_decoder.h>
26#include <linux/i2c.h>
27
28#include "indycam.h"
29
30//#define INDYCAM_DEBUG
31
32#define INDYCAM_MODULE_VERSION "0.0.3"
33
34MODULE_DESCRIPTION("SGI IndyCam driver");
35MODULE_VERSION(INDYCAM_MODULE_VERSION);
36MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
37MODULE_LICENSE("GPL");
38
39#ifdef INDYCAM_DEBUG
40#define dprintk(x...) printk("IndyCam: " x);
41#define indycam_regdump(client) indycam_regdump_debug(client)
42#else
43#define dprintk(x...)
44#define indycam_regdump(client)
45#endif
46
47#define VINO_ADAPTER (I2C_ALGO_SGI | I2C_HW_SGI_VINO)
48
49struct indycam {
50 struct i2c_client *client;
51 int version;
52};
53
54static struct i2c_driver i2c_driver_indycam;
55
56static const unsigned char initseq[] = {
57 INDYCAM_CONTROL_AGCENA, /* INDYCAM_CONTROL */
58 INDYCAM_SHUTTER_DEFAULT, /* INDYCAM_SHUTTER */
59 INDYCAM_GAIN_DEFAULT, /* INDYCAM_GAIN */
60 0x00, /* INDYCAM_BRIGHTNESS (read-only) */
61 INDYCAM_RED_BALANCE_DEFAULT, /* INDYCAM_RED_BALANCE */
62 INDYCAM_BLUE_BALANCE_DEFAULT, /* INDYCAM_BLUE_BALANCE */
63 INDYCAM_RED_SATURATION_DEFAULT, /* INDYCAM_RED_SATURATION */
64 INDYCAM_BLUE_SATURATION_DEFAULT,/* INDYCAM_BLUE_SATURATION */
65};
66
67/* IndyCam register handling */
68
69static int indycam_read_reg(struct i2c_client *client, unsigned char reg,
70 unsigned char *value)
71{
72 int ret;
73
74 if (reg == INDYCAM_RESET) {
75 dprintk("indycam_read_reg(): "
76 "skipping write-only register %d\n", reg);
77 *value = 0;
78 return 0;
79 }
80
81 ret = i2c_smbus_read_byte_data(client, reg);
82 if (ret < 0) {
83 printk(KERN_ERR "IndyCam: indycam_read_reg(): read failed, "
84 "register = 0x%02x\n", reg);
85 return ret;
86 }
87
88 *value = (unsigned char)ret;
89
90 return 0;
91}
92
93static int indycam_write_reg(struct i2c_client *client, unsigned char reg,
94 unsigned char value)
95{
96 int err;
97
98 if ((reg == INDYCAM_BRIGHTNESS)
99 || (reg == INDYCAM_VERSION)) {
100 dprintk("indycam_write_reg(): "
101 "skipping read-only register %d\n", reg);
102 return 0;
103 }
104
105 dprintk("Writing Reg %d = 0x%02x\n", reg, value);
106 err = i2c_smbus_write_byte_data(client, reg, value);
107 if (err) {
108 printk(KERN_ERR "IndyCam: indycam_write_reg(): write failed, "
109 "register = 0x%02x, value = 0x%02x\n", reg, value);
110 }
111 return err;
112}
113
114static int indycam_write_block(struct i2c_client *client, unsigned char reg,
115 unsigned char length, unsigned char *data)
116{
117 unsigned char i;
118 int err;
119
120 for (i = reg; i < length; i++) {
121 err = indycam_write_reg(client, reg + i, data[i]);
122 if (err)
123 return err;
124 }
125
126 return 0;
127}
128
129/* Helper functions */
130
131#ifdef INDYCAM_DEBUG
132static void indycam_regdump_debug(struct i2c_client *client)
133{
134 int i;
135 unsigned char val;
136
137 for (i = 0; i < 9; i++) {
138 indycam_read_reg(client, i, &val);
139 dprintk("Reg %d = 0x%02x\n", i, val);
140 }
141}
142#endif
143
144static int indycam_get_controls(struct i2c_client *client,
145 struct indycam_control *ctrl)
146{
147 unsigned char ctrl_reg;
148
149 indycam_read_reg(client, INDYCAM_CONTROL, &ctrl_reg);
150 ctrl->agc = (ctrl_reg & INDYCAM_CONTROL_AGCENA)
151 ? INDYCAM_VALUE_ENABLED
152 : INDYCAM_VALUE_DISABLED;
153 ctrl->awb = (ctrl_reg & INDYCAM_CONTROL_AWBCTL)
154 ? INDYCAM_VALUE_ENABLED
155 : INDYCAM_VALUE_DISABLED;
156 indycam_read_reg(client, INDYCAM_SHUTTER,
157 (unsigned char *)&ctrl->shutter);
158 indycam_read_reg(client, INDYCAM_GAIN,
159 (unsigned char *)&ctrl->gain);
160 indycam_read_reg(client, INDYCAM_RED_BALANCE,
161 (unsigned char *)&ctrl->red_balance);
162 indycam_read_reg(client, INDYCAM_BLUE_BALANCE,
163 (unsigned char *)&ctrl->blue_balance);
164 indycam_read_reg(client, INDYCAM_RED_SATURATION,
165 (unsigned char *)&ctrl->red_saturation);
166 indycam_read_reg(client, INDYCAM_BLUE_SATURATION,
167 (unsigned char *)&ctrl->blue_saturation);
168 indycam_read_reg(client, INDYCAM_GAMMA,
169 (unsigned char *)&ctrl->gamma);
170
171 return 0;
172}
173
174static int indycam_set_controls(struct i2c_client *client,
175 struct indycam_control *ctrl)
176{
177 unsigned char ctrl_reg;
178
179 indycam_read_reg(client, INDYCAM_CONTROL, &ctrl_reg);
180 if (ctrl->agc != INDYCAM_VALUE_UNCHANGED) {
181 if (ctrl->agc)
182 ctrl_reg |= INDYCAM_CONTROL_AGCENA;
183 else
184 ctrl_reg &= ~INDYCAM_CONTROL_AGCENA;
185 }
186 if (ctrl->awb != INDYCAM_VALUE_UNCHANGED) {
187 if (ctrl->awb)
188 ctrl_reg |= INDYCAM_CONTROL_AWBCTL;
189 else
190 ctrl_reg &= ~INDYCAM_CONTROL_AWBCTL;
191 }
192 indycam_write_reg(client, INDYCAM_CONTROL, ctrl_reg);
193
194 if (ctrl->shutter >= 0)
195 indycam_write_reg(client, INDYCAM_SHUTTER, ctrl->shutter);
196 if (ctrl->gain >= 0)
197 indycam_write_reg(client, INDYCAM_GAIN, ctrl->gain);
198 if (ctrl->red_balance >= 0)
199 indycam_write_reg(client, INDYCAM_RED_BALANCE,
200 ctrl->red_balance);
201 if (ctrl->blue_balance >= 0)
202 indycam_write_reg(client, INDYCAM_BLUE_BALANCE,
203 ctrl->blue_balance);
204 if (ctrl->red_saturation >= 0)
205 indycam_write_reg(client, INDYCAM_RED_SATURATION,
206 ctrl->red_saturation);
207 if (ctrl->blue_saturation >= 0)
208 indycam_write_reg(client, INDYCAM_BLUE_SATURATION,
209 ctrl->blue_saturation);
210 if (ctrl->gamma >= 0)
211 indycam_write_reg(client, INDYCAM_GAMMA, ctrl->gamma);
212
213 return 0;
214}
215
216/* I2C-interface */
217
218static int indycam_attach(struct i2c_adapter *adap, int addr, int kind)
219{
220 int err = 0;
221 struct indycam *camera;
222 struct i2c_client *client;
223
224 printk(KERN_INFO "SGI IndyCam driver version %s\n",
225 INDYCAM_MODULE_VERSION);
226
227 client = kmalloc(sizeof(struct i2c_client), GFP_KERNEL);
228 if (!client)
229 return -ENOMEM;
230 camera = kmalloc(sizeof(struct indycam), GFP_KERNEL);
231 if (!camera) {
232 err = -ENOMEM;
233 goto out_free_client;
234 }
235
236 memset(client, 0, sizeof(struct i2c_client));
237 memset(camera, 0, sizeof(struct indycam));
238
239 client->addr = addr;
240 client->adapter = adap;
241 client->driver = &i2c_driver_indycam;
242 client->flags = 0;
243 strcpy(client->name, "IndyCam client");
244 i2c_set_clientdata(client, camera);
245
246 camera->client = client;
247
248 err = i2c_attach_client(client);
249 if (err)
250 goto out_free_camera;
251
252 camera->version = i2c_smbus_read_byte_data(client, INDYCAM_VERSION);
253 if (camera->version != CAMERA_VERSION_INDY &&
254 camera->version != CAMERA_VERSION_MOOSE) {
255 err = -ENODEV;
256 goto out_detach_client;
257 }
258 printk(KERN_INFO "IndyCam v%d.%d detected\n",
259 INDYCAM_VERSION_MAJOR(camera->version),
260 INDYCAM_VERSION_MINOR(camera->version));
261
262 indycam_regdump(client);
263
264 // initialize
265 err = indycam_write_block(client, 0, sizeof(initseq),
266 (unsigned char *)&initseq);
267 if (err) {
268 printk(KERN_ERR "IndyCam initalization failed\n");
269 err = -EIO;
270 goto out_detach_client;
271 }
272
273 indycam_regdump(client);
274
275 // white balance
276 err = indycam_write_reg(client, INDYCAM_CONTROL,
277 INDYCAM_CONTROL_AGCENA | INDYCAM_CONTROL_AWBCTL);
278 if (err) {
279 printk(KERN_ERR "IndyCam white balance "
280 "initialization failed\n");
281 err = -EIO;
282 goto out_detach_client;
283 }
284
285 indycam_regdump(client);
286
287 printk(KERN_INFO "IndyCam initialized\n");
288
289 return 0;
290
291out_detach_client:
292 i2c_detach_client(client);
293out_free_camera:
294 kfree(camera);
295out_free_client:
296 kfree(client);
297 return err;
298}
299
300static int indycam_probe(struct i2c_adapter *adap)
301{
302 /* Indy specific crap */
303 if (adap->id == VINO_ADAPTER)
304 return indycam_attach(adap, INDYCAM_ADDR, 0);
305 /* Feel free to add probe here :-) */
306 return -ENODEV;
307}
308
309static int indycam_detach(struct i2c_client *client)
310{
311 struct indycam *camera = i2c_get_clientdata(client);
312
313 i2c_detach_client(client);
314 kfree(camera);
315 kfree(client);
316 return 0;
317}
318
319static int indycam_command(struct i2c_client *client, unsigned int cmd,
320 void *arg)
321{
322 // struct indycam *camera = i2c_get_clientdata(client);
323
324 /* The old video_decoder interface just isn't enough,
325 * so we'll use some custom commands. */
326 switch (cmd) {
327 case DECODER_GET_CAPABILITIES: {
328 struct video_decoder_capability *cap = arg;
329
330 cap->flags = VIDEO_DECODER_NTSC;
331 cap->inputs = 1;
332 cap->outputs = 1;
333 break;
334 }
335 case DECODER_GET_STATUS: {
336 int *iarg = arg;
337
338 *iarg = DECODER_STATUS_GOOD | DECODER_STATUS_NTSC |
339 DECODER_STATUS_COLOR;
340 break;
341 }
342 case DECODER_SET_NORM: {
343 int *iarg = arg;
344
345 switch (*iarg) {
346 case VIDEO_MODE_NTSC:
347 break;
348 default:
349 return -EINVAL;
350 }
351 break;
352 }
353 case DECODER_SET_INPUT: {
354 int *iarg = arg;
355
356 if (*iarg != 0)
357 return -EINVAL;
358 break;
359 }
360 case DECODER_SET_OUTPUT: {
361 int *iarg = arg;
362
363 if (*iarg != 0)
364 return -EINVAL;
365 break;
366 }
367 case DECODER_ENABLE_OUTPUT: {
368 /* Always enabled */
369 break;
370 }
371 case DECODER_SET_PICTURE: {
372 // struct video_picture *pic = arg;
373 /* TODO: convert values for indycam_set_controls() */
374 break;
375 }
376 case DECODER_INDYCAM_GET_CONTROLS: {
377 struct indycam_control *ctrl = arg;
378 indycam_get_controls(client, ctrl);
379 }
380 case DECODER_INDYCAM_SET_CONTROLS: {
381 struct indycam_control *ctrl = arg;
382 indycam_set_controls(client, ctrl);
383 }
384 default:
385 return -EINVAL;
386 }
387
388 return 0;
389}
390
391static struct i2c_driver i2c_driver_indycam = {
392 .owner = THIS_MODULE,
393 .name = "indycam",
394 .id = I2C_DRIVERID_INDYCAM,
395 .flags = I2C_DF_NOTIFY,
396 .attach_adapter = indycam_probe,
397 .detach_client = indycam_detach,
398 .command = indycam_command,
399};
400
401static int __init indycam_init(void)
402{
403 return i2c_add_driver(&i2c_driver_indycam);
404}
405
406static void __exit indycam_exit(void)
407{
408 i2c_del_driver(&i2c_driver_indycam);
409}
410
411module_init(indycam_init);
412module_exit(indycam_exit);
diff --git a/drivers/media/video/indycam.h b/drivers/media/video/indycam.h
new file mode 100644
index 000000000000..d9ddb6b79a03
--- /dev/null
+++ b/drivers/media/video/indycam.h
@@ -0,0 +1,112 @@
1/*
2 * indycam.h - Silicon Graphics IndyCam digital camera driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _INDYCAM_H_
13#define _INDYCAM_H_
14
15/* I2C address for the Guinness Camera */
16#define INDYCAM_ADDR 0x56
17
18/* Camera version */
19#define CAMERA_VERSION_INDY 0x10 /* v1.0 */
20#define CAMERA_VERSION_MOOSE 0x12 /* v1.2 */
21#define INDYCAM_VERSION_MAJOR(x) (((x) & 0xf0) >> 4)
22#define INDYCAM_VERSION_MINOR(x) ((x) & 0x0f)
23
24/* Register bus addresses */
25#define INDYCAM_CONTROL 0x00
26#define INDYCAM_SHUTTER 0x01
27#define INDYCAM_GAIN 0x02
28#define INDYCAM_BRIGHTNESS 0x03 /* read-only */
29#define INDYCAM_RED_BALANCE 0x04
30#define INDYCAM_BLUE_BALANCE 0x05
31#define INDYCAM_RED_SATURATION 0x06
32#define INDYCAM_BLUE_SATURATION 0x07
33#define INDYCAM_GAMMA 0x08
34#define INDYCAM_VERSION 0x0e /* read-only */
35#define INDYCAM_RESET 0x0f /* write-only */
36
37#define INDYCAM_LED 0x46
38#define INDYCAM_ORIENTATION 0x47
39#define INDYCAM_BUTTON 0x48
40
41/* Field definitions of registers */
42#define INDYCAM_CONTROL_AGCENA (1<<0) /* automatic gain control */
43#define INDYCAM_CONTROL_AWBCTL (1<<1) /* automatic white balance */
44 /* 2-3 are reserved */
45#define INDYCAM_CONTROL_EVNFLD (1<<4) /* read-only */
46
47#define INDYCAM_SHUTTER_10000 0x02 /* 1/10000 second */
48#define INDYCAM_SHUTTER_4000 0x04 /* 1/4000 second */
49#define INDYCAM_SHUTTER_2000 0x08 /* 1/2000 second */
50#define INDYCAM_SHUTTER_1000 0x10 /* 1/1000 second */
51#define INDYCAM_SHUTTER_500 0x20 /* 1/500 second */
52#define INDYCAM_SHUTTER_250 0x3f /* 1/250 second */
53#define INDYCAM_SHUTTER_125 0x7e /* 1/125 second */
54#define INDYCAM_SHUTTER_100 0x9e /* 1/100 second */
55#define INDYCAM_SHUTTER_60 0x00 /* 1/60 second */
56
57#define INDYCAM_LED_ACTIVE 0x10
58#define INDYCAM_LED_INACTIVE 0x30
59#define INDYCAM_ORIENTATION_BOTTOM_TO_TOP 0x40
60#define INDYCAM_BUTTON_RELEASED 0x10
61
62#define INDYCAM_SHUTTER_MIN 0x00
63#define INDYCAM_SHUTTER_MAX 0xff
64#define INDYCAM_GAIN_MIN 0x00
65#define INDYCAM_GAIN_MAX 0xff
66#define INDYCAM_RED_BALANCE_MIN 0x00 /* the effect is the opposite? */
67#define INDYCAM_RED_BALANCE_MAX 0xff
68#define INDYCAM_BLUE_BALANCE_MIN 0x00 /* the effect is the opposite? */
69#define INDYCAM_BLUE_BALANCE_MAX 0xff
70#define INDYCAM_RED_SATURATION_MIN 0x00
71#define INDYCAM_RED_SATURATION_MAX 0xff
72#define INDYCAM_BLUE_SATURATION_MIN 0x00
73#define INDYCAM_BLUE_SATURATION_MAX 0xff
74#define INDYCAM_GAMMA_MIN 0x00
75#define INDYCAM_GAMMA_MAX 0xff
76
77/* Driver interface definitions */
78
79#define INDYCAM_VALUE_ENABLED 1
80#define INDYCAM_VALUE_DISABLED 0
81#define INDYCAM_VALUE_UNCHANGED -1
82
83/* When setting controls, a value of -1 leaves the control unchanged. */
84struct indycam_control {
85 int agc; /* boolean */
86 int awb; /* boolean */
87 int shutter;
88 int gain;
89 int red_balance;
90 int blue_balance;
91 int red_saturation;
92 int blue_saturation;
93 int gamma;
94};
95
96#define DECODER_INDYCAM_GET_CONTROLS _IOR('d', 193, struct indycam_control)
97#define DECODER_INDYCAM_SET_CONTROLS _IOW('d', 194, struct indycam_control)
98
99/* Default values for controls */
100
101#define INDYCAM_AGC_DEFAULT INDYCAM_VALUE_ENABLED
102#define INDYCAM_AWB_DEFAULT INDYCAM_VALUE_ENABLED
103
104#define INDYCAM_SHUTTER_DEFAULT INDYCAM_SHUTTER_60
105#define INDYCAM_GAIN_DEFAULT 0x80
106#define INDYCAM_RED_BALANCE_DEFAULT 0x18
107#define INDYCAM_BLUE_BALANCE_DEFAULT 0xa4
108#define INDYCAM_RED_SATURATION_DEFAULT 0x80
109#define INDYCAM_BLUE_SATURATION_DEFAULT 0xc0
110#define INDYCAM_GAMMA_DEFAULT 0x80
111
112#endif
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index fe194012bccf..3f2a882bc20a 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -37,6 +37,7 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/vmalloc.h> 39#include <linux/vmalloc.h>
40#include <linux/dma-mapping.h>
40 41
41#include "meye.h" 42#include "meye.h"
42#include <linux/meye.h> 43#include <linux/meye.h>
@@ -121,7 +122,7 @@ static int ptable_alloc(void)
121 memset(meye.mchip_ptable, 0, sizeof(meye.mchip_ptable)); 122 memset(meye.mchip_ptable, 0, sizeof(meye.mchip_ptable));
122 123
123 /* give only 32 bit DMA addresses */ 124 /* give only 32 bit DMA addresses */
124 if (dma_set_mask(&meye.mchip_dev->dev, 0xffffffff)) 125 if (dma_set_mask(&meye.mchip_dev->dev, DMA_32BIT_MASK))
125 return -1; 126 return -1;
126 127
127 meye.mchip_ptable_toc = dma_alloc_coherent(&meye.mchip_dev->dev, 128 meye.mchip_ptable_toc = dma_alloc_coherent(&meye.mchip_dev->dev,
diff --git a/drivers/media/video/saa7111.c b/drivers/media/video/saa7111.c
index f18df53d98ff..fe8a5e453969 100644
--- a/drivers/media/video/saa7111.c
+++ b/drivers/media/video/saa7111.c
@@ -42,7 +42,6 @@
42#include <asm/pgtable.h> 42#include <asm/pgtable.h>
43#include <asm/page.h> 43#include <asm/page.h>
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <asm/segment.h>
46#include <linux/types.h> 45#include <linux/types.h>
47 46
48#include <linux/videodev.h> 47#include <linux/videodev.h>
diff --git a/drivers/media/video/saa7114.c b/drivers/media/video/saa7114.c
index e0c70f54f073..d9f50e2f7b92 100644
--- a/drivers/media/video/saa7114.c
+++ b/drivers/media/video/saa7114.c
@@ -45,7 +45,6 @@
45#include <asm/pgtable.h> 45#include <asm/pgtable.h>
46#include <asm/page.h> 46#include <asm/page.h>
47#include <linux/sched.h> 47#include <linux/sched.h>
48#include <asm/segment.h>
49#include <linux/types.h> 48#include <linux/types.h>
50 49
51#include <linux/videodev.h> 50#include <linux/videodev.h>
diff --git a/drivers/media/video/saa7185.c b/drivers/media/video/saa7185.c
index e93412f4407c..132aa7943c16 100644
--- a/drivers/media/video/saa7185.c
+++ b/drivers/media/video/saa7185.c
@@ -39,7 +39,6 @@
39#include <asm/pgtable.h> 39#include <asm/pgtable.h>
40#include <asm/page.h> 40#include <asm/page.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <asm/segment.h>
43#include <linux/types.h> 42#include <linux/types.h>
44 43
45#include <linux/videodev.h> 44#include <linux/videodev.h>
diff --git a/drivers/media/video/saa7191.c b/drivers/media/video/saa7191.c
new file mode 100644
index 000000000000..454f5c1199b4
--- /dev/null
+++ b/drivers/media/video/saa7191.c
@@ -0,0 +1,512 @@
1/*
2 * saa7191.c - Philips SAA7191 video decoder driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/errno.h>
16#include <linux/fs.h>
17#include <linux/kernel.h>
18#include <linux/major.h>
19#include <linux/slab.h>
20#include <linux/mm.h>
21#include <linux/sched.h>
22
23#include <linux/videodev.h>
24#include <linux/video_decoder.h>
25#include <linux/i2c.h>
26
27#include "saa7191.h"
28
29#define SAA7191_MODULE_VERSION "0.0.3"
30
31MODULE_DESCRIPTION("Philips SAA7191 video decoder driver");
32MODULE_VERSION(SAA7191_MODULE_VERSION);
33MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
34MODULE_LICENSE("GPL");
35
36#define VINO_ADAPTER (I2C_ALGO_SGI | I2C_HW_SGI_VINO)
37
38struct saa7191 {
39 struct i2c_client *client;
40
41 /* the register values are stored here as the actual
42 * I2C-registers are write-only */
43 unsigned char reg[25];
44
45 unsigned char norm;
46 unsigned char input;
47};
48
49static struct i2c_driver i2c_driver_saa7191;
50
51static const unsigned char initseq[] = {
52 0, /* Subaddress */
53 0x50, /* SAA7191_REG_IDEL */
54 0x30, /* SAA7191_REG_HSYB */
55 0x00, /* SAA7191_REG_HSYS */
56 0xe8, /* SAA7191_REG_HCLB */
57 0xb6, /* SAA7191_REG_HCLS */
58 0xf4, /* SAA7191_REG_HPHI */
59 0x01, /* SAA7191_REG_LUMA - chrominance trap active (CVBS) */
60 0x00, /* SAA7191_REG_HUEC */
61 0xf8, /* SAA7191_REG_CKTQ */
62 0xf8, /* SAA7191_REG_CKTS */
63 0x90, /* SAA7191_REG_PLSE */
64 0x90, /* SAA7191_REG_SESE */
65 0x00, /* SAA7191_REG_GAIN */
66 0x0c, /* SAA7191_REG_STDC - not SECAM, slow time constant */
67 0x78, /* SAA7191_REG_IOCK - chrominance from CVBS, GPSW1 & 2 off */
68 0x99, /* SAA7191_REG_CTL3 - automatic field detection */
69 0x00, /* SAA7191_REG_CTL4 */
70 0x2c, /* SAA7191_REG_CHCV */
71 0x00, /* unused */
72 0x00, /* unused */
73 0x34, /* SAA7191_REG_HS6B */
74 0x0a, /* SAA7191_REG_HS6S */
75 0xf4, /* SAA7191_REG_HC6B */
76 0xce, /* SAA7191_REG_HC6S */
77 0xf4, /* SAA7191_REG_HP6I */
78};
79
80/* SAA7191 register handling */
81
82static unsigned char saa7191_read_reg(struct i2c_client *client,
83 unsigned char reg)
84{
85 return ((struct saa7191 *)i2c_get_clientdata(client))->reg[reg];
86}
87
88static int saa7191_read_status(struct i2c_client *client,
89 unsigned char *value)
90{
91 int ret;
92
93 ret = i2c_master_recv(client, value, 1);
94 if (ret < 0) {
95 printk(KERN_ERR "SAA7191: saa7191_read_status(): read failed");
96 return ret;
97 }
98
99 return 0;
100}
101
102
103static int saa7191_write_reg(struct i2c_client *client, unsigned char reg,
104 unsigned char value)
105{
106
107 ((struct saa7191 *)i2c_get_clientdata(client))->reg[reg] = value;
108 return i2c_smbus_write_byte_data(client, reg, value);
109}
110
111/* the first byte of data must be the first subaddress number (register) */
112static int saa7191_write_block(struct i2c_client *client,
113 unsigned char length, unsigned char *data)
114{
115 int i;
116 int ret;
117
118 struct saa7191 *decoder = (struct saa7191 *)i2c_get_clientdata(client);
119 for (i = 0; i < (length - 1); i++) {
120 decoder->reg[data[0] + i] = data[i + 1];
121 }
122
123 ret = i2c_master_send(client, data, length);
124 if (ret < 0) {
125 printk(KERN_ERR "SAA7191: saa7191_write_block(): "
126 "write failed");
127 return ret;
128 }
129
130 return 0;
131}
132
133/* Helper functions */
134
135static int saa7191_set_input(struct i2c_client *client, int input)
136{
137 unsigned char luma = saa7191_read_reg(client, SAA7191_REG_LUMA);
138 unsigned char iock = saa7191_read_reg(client, SAA7191_REG_IOCK);
139 int err;
140
141 switch (input) {
142 case SAA7191_INPUT_COMPOSITE: /* Set Composite input */
143 iock &= ~(SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW1
144 | SAA7191_IOCK_GPSW2);
145 /* Chrominance trap active */
146 luma &= ~SAA7191_LUMA_BYPS;
147 break;
148 case SAA7191_INPUT_SVIDEO: /* Set S-Video input */
149 iock |= SAA7191_IOCK_CHRS | SAA7191_IOCK_GPSW2;
150 /* Chrominance trap bypassed */
151 luma |= SAA7191_LUMA_BYPS;
152 break;
153 default:
154 return -EINVAL;
155 }
156
157 err = saa7191_write_reg(client, SAA7191_REG_LUMA, luma);
158 if (err)
159 return -EIO;
160 err = saa7191_write_reg(client, SAA7191_REG_IOCK, iock);
161 if (err)
162 return -EIO;
163
164 return 0;
165}
166
167static int saa7191_set_norm(struct i2c_client *client, int norm)
168{
169 struct saa7191 *decoder = i2c_get_clientdata(client);
170 unsigned char stdc = saa7191_read_reg(client, SAA7191_REG_STDC);
171 unsigned char ctl3 = saa7191_read_reg(client, SAA7191_REG_CTL3);
172 unsigned char chcv = saa7191_read_reg(client, SAA7191_REG_CHCV);
173 int err;
174
175 switch(norm) {
176 case SAA7191_NORM_AUTO: {
177 unsigned char status;
178
179 // does status depend on current norm ?
180 if (saa7191_read_status(client, &status))
181 return -EIO;
182
183 stdc &= ~SAA7191_STDC_SECS;
184 ctl3 &= ~SAA7191_CTL3_FSEL;
185 ctl3 |= SAA7191_CTL3_AUFD;
186 chcv = (status & SAA7191_STATUS_FIDT)
187 ? SAA7191_CHCV_NTSC : SAA7191_CHCV_PAL;
188 break;
189 }
190 case SAA7191_NORM_PAL:
191 stdc &= ~SAA7191_STDC_SECS;
192 ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
193 chcv = SAA7191_CHCV_PAL;
194 break;
195 case SAA7191_NORM_NTSC:
196 stdc &= ~SAA7191_STDC_SECS;
197 ctl3 &= ~SAA7191_CTL3_AUFD;
198 ctl3 |= SAA7191_CTL3_FSEL;
199 chcv = SAA7191_CHCV_NTSC;
200 break;
201 case SAA7191_NORM_SECAM:
202 stdc |= SAA7191_STDC_SECS;
203 ctl3 &= ~(SAA7191_CTL3_AUFD | SAA7191_CTL3_FSEL);
204 chcv = SAA7191_CHCV_PAL;
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 err = saa7191_write_reg(client, SAA7191_REG_CTL3, ctl3);
211 if (err)
212 return -EIO;
213 err = saa7191_write_reg(client, SAA7191_REG_STDC, stdc);
214 if (err)
215 return -EIO;
216 err = saa7191_write_reg(client, SAA7191_REG_CHCV, chcv);
217 if (err)
218 return -EIO;
219
220 decoder->norm = norm;
221
222 return 0;
223}
224
225static int saa7191_get_controls(struct i2c_client *client,
226 struct saa7191_control *ctrl)
227{
228 unsigned char hue = saa7191_read_reg(client, SAA7191_REG_HUEC);
229 unsigned char stdc = saa7191_read_reg(client, SAA7191_REG_STDC);
230
231 if (hue < 0x80) {
232 hue += 0x80;
233 } else {
234 hue -= 0x80;
235 }
236 ctrl->hue = hue;
237
238 ctrl->vtrc = (stdc & SAA7191_STDC_VTRC)
239 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
240
241 return 0;
242}
243
244static int saa7191_set_controls(struct i2c_client *client,
245 struct saa7191_control *ctrl)
246{
247 int err;
248
249 if (ctrl->hue >= 0) {
250 unsigned char hue = ctrl->hue & 0xff;
251 if (hue < 0x80) {
252 hue += 0x80;
253 } else {
254 hue -= 0x80;
255 }
256 err = saa7191_write_reg(client, SAA7191_REG_HUEC, hue);
257 if (err)
258 return -EIO;
259 }
260 if (ctrl->vtrc >= 0) {
261 unsigned char stdc =
262 saa7191_read_reg(client, SAA7191_REG_STDC);
263
264 if (ctrl->vtrc) {
265 stdc |= SAA7191_STDC_VTRC;
266 } else {
267 stdc &= ~SAA7191_STDC_VTRC;
268 }
269
270 err = saa7191_write_reg(client, SAA7191_REG_STDC, stdc);
271 if (err)
272 return -EIO;
273 }
274
275 return 0;
276}
277
278/* I2C-interface */
279
280static int saa7191_attach(struct i2c_adapter *adap, int addr, int kind)
281{
282 int err = 0;
283 struct saa7191 *decoder;
284 struct i2c_client *client;
285
286 printk(KERN_INFO "Philips SAA7191 driver version %s\n",
287 SAA7191_MODULE_VERSION);
288
289 client = kmalloc(sizeof(*client), GFP_KERNEL);
290 if (!client)
291 return -ENOMEM;
292 decoder = kmalloc(sizeof(*decoder), GFP_KERNEL);
293 if (!decoder) {
294 err = -ENOMEM;
295 goto out_free_client;
296 }
297
298 memset(client, 0, sizeof(struct i2c_client));
299 memset(decoder, 0, sizeof(struct saa7191));
300
301 client->addr = addr;
302 client->adapter = adap;
303 client->driver = &i2c_driver_saa7191;
304 client->flags = 0;
305 strcpy(client->name, "saa7191 client");
306 i2c_set_clientdata(client, decoder);
307
308 decoder->client = client;
309
310 err = i2c_attach_client(client);
311 if (err)
312 goto out_free_decoder;
313
314 decoder->input = SAA7191_INPUT_COMPOSITE;
315 decoder->norm = SAA7191_NORM_AUTO;
316
317 err = saa7191_write_block(client, sizeof(initseq),
318 (unsigned char *)initseq);
319 if (err) {
320 printk(KERN_ERR "SAA7191 initialization failed\n");
321 goto out_detach_client;
322 }
323
324 printk(KERN_INFO "SAA7191 initialized\n");
325
326 return 0;
327
328out_detach_client:
329 i2c_detach_client(client);
330out_free_decoder:
331 kfree(decoder);
332out_free_client:
333 kfree(client);
334 return err;
335}
336
337static int saa7191_probe(struct i2c_adapter *adap)
338{
339 /* Always connected to VINO */
340 if (adap->id == VINO_ADAPTER)
341 return saa7191_attach(adap, SAA7191_ADDR, 0);
342 /* Feel free to add probe here :-) */
343 return -ENODEV;
344}
345
346static int saa7191_detach(struct i2c_client *client)
347{
348 struct saa7191 *decoder = i2c_get_clientdata(client);
349
350 i2c_detach_client(client);
351 kfree(decoder);
352 kfree(client);
353 return 0;
354}
355
356static int saa7191_command(struct i2c_client *client, unsigned int cmd,
357 void *arg)
358{
359 struct saa7191 *decoder = i2c_get_clientdata(client);
360
361 switch (cmd) {
362 case DECODER_GET_CAPABILITIES: {
363 struct video_decoder_capability *cap = arg;
364
365 cap->flags = VIDEO_DECODER_PAL | VIDEO_DECODER_NTSC |
366 VIDEO_DECODER_SECAM | VIDEO_DECODER_AUTO;
367 cap->inputs = (client->adapter->id == VINO_ADAPTER) ? 2 : 1;
368 cap->outputs = 1;
369 break;
370 }
371 case DECODER_GET_STATUS: {
372 int *iarg = arg;
373 unsigned char status;
374 int res = 0;
375
376 if (saa7191_read_status(client, &status)) {
377 return -EIO;
378 }
379 if ((status & SAA7191_STATUS_HLCK) == 0)
380 res |= DECODER_STATUS_GOOD;
381 if (status & SAA7191_STATUS_CODE)
382 res |= DECODER_STATUS_COLOR;
383 switch (decoder->norm) {
384 case SAA7191_NORM_NTSC:
385 res |= DECODER_STATUS_NTSC;
386 break;
387 case SAA7191_NORM_PAL:
388 res |= DECODER_STATUS_PAL;
389 break;
390 case SAA7191_NORM_SECAM:
391 res |= DECODER_STATUS_SECAM;
392 break;
393 case SAA7191_NORM_AUTO:
394 default:
395 if (status & SAA7191_STATUS_FIDT)
396 res |= DECODER_STATUS_NTSC;
397 else
398 res |= DECODER_STATUS_PAL;
399 break;
400 }
401 *iarg = res;
402 break;
403 }
404 case DECODER_SET_NORM: {
405 int *iarg = arg;
406
407 switch (*iarg) {
408 case VIDEO_MODE_AUTO:
409 return saa7191_set_norm(client, SAA7191_NORM_AUTO);
410 case VIDEO_MODE_PAL:
411 return saa7191_set_norm(client, SAA7191_NORM_PAL);
412 case VIDEO_MODE_NTSC:
413 return saa7191_set_norm(client, SAA7191_NORM_NTSC);
414 case VIDEO_MODE_SECAM:
415 return saa7191_set_norm(client, SAA7191_NORM_SECAM);
416 default:
417 return -EINVAL;
418 }
419 break;
420 }
421 case DECODER_SET_INPUT: {
422 int *iarg = arg;
423
424 switch (client->adapter->id) {
425 case VINO_ADAPTER:
426 return saa7191_set_input(client, *iarg);
427 default:
428 if (*iarg != 0)
429 return -EINVAL;
430 }
431 break;
432 }
433 case DECODER_SET_OUTPUT: {
434 int *iarg = arg;
435
436 /* not much choice of outputs */
437 if (*iarg != 0)
438 return -EINVAL;
439 break;
440 }
441 case DECODER_ENABLE_OUTPUT: {
442 /* Always enabled */
443 break;
444 }
445 case DECODER_SET_PICTURE: {
446 struct video_picture *pic = arg;
447 unsigned val;
448 int err;
449
450 val = (pic->hue >> 8) - 0x80;
451 err = saa7191_write_reg(client, SAA7191_REG_HUEC, val);
452 if (err)
453 return -EIO;
454 break;
455 }
456 case DECODER_SAA7191_GET_STATUS: {
457 struct saa7191_status *status = arg;
458 unsigned char status_reg;
459
460 if (saa7191_read_status(client, &status_reg))
461 return -EIO;
462 status->signal = ((status_reg & SAA7191_STATUS_HLCK) == 0)
463 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
464 status->ntsc = (status_reg & SAA7191_STATUS_FIDT)
465 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
466 status->color = (status_reg & SAA7191_STATUS_CODE)
467 ? SAA7191_VALUE_ENABLED : SAA7191_VALUE_DISABLED;
468
469 status->input = decoder->input;
470 status->norm = decoder->norm;
471 }
472 case DECODER_SAA7191_SET_NORM: {
473 int *norm = arg;
474 return saa7191_set_norm(client, *norm);
475 }
476 case DECODER_SAA7191_GET_CONTROLS: {
477 struct saa7191_control *ctrl = arg;
478 return saa7191_get_controls(client, ctrl);
479 }
480 case DECODER_SAA7191_SET_CONTROLS: {
481 struct saa7191_control *ctrl = arg;
482 return saa7191_set_controls(client, ctrl);
483 }
484 default:
485 return -EINVAL;
486 }
487
488 return 0;
489}
490
491static struct i2c_driver i2c_driver_saa7191 = {
492 .owner = THIS_MODULE,
493 .name = "saa7191",
494 .id = I2C_DRIVERID_SAA7191,
495 .flags = I2C_DF_NOTIFY,
496 .attach_adapter = saa7191_probe,
497 .detach_client = saa7191_detach,
498 .command = saa7191_command
499};
500
501static int saa7191_init(void)
502{
503 return i2c_add_driver(&i2c_driver_saa7191);
504}
505
506static void saa7191_exit(void)
507{
508 i2c_del_driver(&i2c_driver_saa7191);
509}
510
511module_init(saa7191_init);
512module_exit(saa7191_exit);
diff --git a/drivers/media/video/saa7191.h b/drivers/media/video/saa7191.h
new file mode 100644
index 000000000000..272045031435
--- /dev/null
+++ b/drivers/media/video/saa7191.h
@@ -0,0 +1,139 @@
1/*
2 * saa7191.h - Philips SAA7191 video decoder driver
3 *
4 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
5 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _SAA7191_H_
13#define _SAA7191_H_
14
15/* Philips SAA7191 DMSD I2C bus address */
16#define SAA7191_ADDR 0x8a
17
18/* Register subaddresses. */
19#define SAA7191_REG_IDEL 0x00
20#define SAA7191_REG_HSYB 0x01
21#define SAA7191_REG_HSYS 0x02
22#define SAA7191_REG_HCLB 0x03
23#define SAA7191_REG_HCLS 0x04
24#define SAA7191_REG_HPHI 0x05
25#define SAA7191_REG_LUMA 0x06
26#define SAA7191_REG_HUEC 0x07
27#define SAA7191_REG_CKTQ 0x08
28#define SAA7191_REG_CKTS 0x09
29#define SAA7191_REG_PLSE 0x0a
30#define SAA7191_REG_SESE 0x0b
31#define SAA7191_REG_GAIN 0x0c
32#define SAA7191_REG_STDC 0x0d
33#define SAA7191_REG_IOCK 0x0e
34#define SAA7191_REG_CTL3 0x0f
35#define SAA7191_REG_CTL4 0x10
36#define SAA7191_REG_CHCV 0x11
37#define SAA7191_REG_HS6B 0x14
38#define SAA7191_REG_HS6S 0x15
39#define SAA7191_REG_HC6B 0x16
40#define SAA7191_REG_HC6S 0x17
41#define SAA7191_REG_HP6I 0x18
42#define SAA7191_REG_STATUS 0xff /* not really a subaddress */
43
44/* Status Register definitions */
45#define SAA7191_STATUS_CODE 0x01 /* color detected flag */
46#define SAA7191_STATUS_FIDT 0x20 /* format type NTSC/PAL */
47#define SAA7191_STATUS_HLCK 0x40 /* PLL unlocked/locked */
48#define SAA7191_STATUS_STTC 0x80 /* tv/vtr time constant */
49
50/* Luminance Control Register definitions */
51#define SAA7191_LUMA_BYPS 0x80
52
53/* Chroma Gain Control Settings Register definitions */
54/* 0=automatic colour-killer enabled, 1=forced colour on */
55#define SAA7191_GAIN_COLO 0x80
56
57/* Standard/Mode Control Register definitions */
58/* tv/vtr mode bit: 0=TV mode (slow time constant),
59 * 1=VTR mode (fast time constant) */
60#define SAA7191_STDC_VTRC 0x80
61/* SECAM mode bit: 0=other standards, 1=SECAM */
62#define SAA7191_STDC_SECS 0x01
63/* the bit fields above must be or'd with this value */
64#define SAA7191_STDC_VALUE 0x0c
65
66/* I/O and Clock Control Register definitions */
67/* horizontal clock PLL: 0=PLL closed,
68 * 1=PLL circuit open and horizontal freq fixed */
69#define SAA7191_IOCK_HPLL 0x80
70/* S-VHS bit (chrominance from CVBS or from chrominance input):
71 * 0=controlled by BYPS-bit, 1=from chrominance input */
72#define SAA7191_IOCK_CHRS 0x04
73/* general purpose switch 2
74 * VINO-specific: 0=used with CVBS, 1=used with S-Video */
75#define SAA7191_IOCK_GPSW2 0x02
76/* general purpose switch 1 */
77/* VINO-specific: 0=always, 1=not used!*/
78#define SAA7191_IOCK_GPSW1 0x01
79
80/* Miscellaneous Control #1 Register definitions */
81/* automatic field detection (50/60Hz standard) */
82#define SAA7191_CTL3_AUFD 0x80
83/* field select: (if AUFD=0)
84 * 0=50Hz (625 lines), 1=60Hz (525 lines) */
85#define SAA7191_CTL3_FSEL 0x40
86/* the bit fields above must be or'd with this value */
87#define SAA7191_CTL3_VALUE 0x19
88
89/* Chrominance Gain Control Register definitions
90 * (nominal value for UV CCIR level) */
91#define SAA7191_CHCV_NTSC 0x2c
92#define SAA7191_CHCV_PAL 0x59
93
94/* Driver interface definitions */
95#define SAA7191_INPUT_COMPOSITE 0
96#define SAA7191_INPUT_SVIDEO 1
97
98#define SAA7191_NORM_AUTO 0
99#define SAA7191_NORM_PAL 1
100#define SAA7191_NORM_NTSC 2
101#define SAA7191_NORM_SECAM 3
102
103#define SAA7191_VALUE_ENABLED 1
104#define SAA7191_VALUE_DISABLED 0
105#define SAA7191_VALUE_UNCHANGED -1
106
107struct saa7191_status {
108 /* 0=no signal, 1=signal active*/
109 int signal;
110 /* 0=50hz (pal) signal, 1=60hz (ntsc) signal */
111 int ntsc;
112 /* 0=no color detected, 1=color detected */
113 int color;
114
115 /* current SAA7191_INPUT_ */
116 int input;
117 /* current SAA7191_NORM_ */
118 int norm;
119};
120
121#define SAA7191_HUE_MIN 0x00
122#define SAA7191_HUE_MAX 0xff
123#define SAA7191_HUE_DEFAULT 0x80
124
125#define SAA7191_VTRC_MIN 0x00
126#define SAA7191_VTRC_MAX 0x01
127#define SAA7191_VTRC_DEFAULT 0x00
128
129struct saa7191_control {
130 int hue;
131 int vtrc;
132};
133
134#define DECODER_SAA7191_GET_STATUS _IOR('d', 195, struct saa7191_status)
135#define DECODER_SAA7191_SET_NORM _IOW('d', 196, int)
136#define DECODER_SAA7191_GET_CONTROLS _IOR('d', 197, struct saa7191_control)
137#define DECODER_SAA7191_SET_CONTROLS _IOW('d', 198, struct saa7191_control)
138
139#endif
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 76e8681d65c6..d8a0f763ca10 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -1,80 +1,606 @@
1/* 1/*
2 * (incomplete) Driver for the VINO (Video In No Out) system found in SGI Indys. 2 * Driver for the VINO (Video In No Out) system found in SGI Indys.
3 * 3 *
4 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
5 * License version 2 as published by the Free Software Foundation. 5 * License version 2 as published by the Free Software Foundation.
6 * 6 *
7 * Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
8 *
9 * Based on the previous version of the driver for 2.4 kernels by:
7 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> 10 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
8 */ 11 */
9 12
10#include <linux/module.h> 13/*
14 * TODO:
15 * - remove "hacks" from memory allocation code and implement nopage()
16 * - check decimation, calculating and reporting image size when
17 * using decimation
18 * - check vino_acquire_input(), vino_set_input() and channel
19 * ownership handling
20 * - report VINO error-interrupts via ioctls ?
21 * - implement picture controls (all implemented?)
22 * - use macros for boolean values (?)
23 * - implement user mode buffers and overlay (?)
24 */
25
11#include <linux/init.h> 26#include <linux/init.h>
12#include <linux/types.h> 27#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/wrapper.h>
16#include <linux/errno.h>
17#include <linux/irq.h>
18#include <linux/delay.h> 28#include <linux/delay.h>
19#include <linux/videodev.h> 29#include <linux/errno.h>
30#include <linux/fs.h>
31#include <linux/kernel.h>
32#include <linux/mm.h>
33#include <linux/interrupt.h>
34#include <linux/dma-mapping.h>
35#include <linux/time.h>
36#include <linux/moduleparam.h>
37
38#ifdef CONFIG_KMOD
39#include <linux/kmod.h>
40#endif
41
20#include <linux/i2c.h> 42#include <linux/i2c.h>
21#include <linux/i2c-algo-sgi.h> 43#include <linux/i2c-algo-sgi.h>
22 44
23#include <asm/addrspace.h> 45#include <linux/videodev.h>
24#include <asm/system.h> 46#include <linux/videodev2.h>
25#include <asm/bootinfo.h> 47#include <linux/video_decoder.h>
26#include <asm/pgtable.h> 48
27#include <asm/paccess.h> 49#include <asm/paccess.h>
28#include <asm/io.h> 50#include <asm/io.h>
29#include <asm/sgi/ip22.h> 51#include <asm/sgi/ip22.h>
30#include <asm/sgi/hpc3.h>
31#include <asm/sgi/mc.h> 52#include <asm/sgi/mc.h>
32 53
33#include "vino.h" 54#include "vino.h"
55#include "saa7191.h"
56#include "indycam.h"
57
58/* Uncomment the following line to get lots and lots of (mostly useless)
59 * debug info.
60 * Note that the debug output also slows down the driver significantly */
61// #define VINO_DEBUG
62
63#define VINO_MODULE_VERSION "0.0.3"
64#define VINO_VERSION_CODE KERNEL_VERSION(0, 0, 3)
65
66MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
67MODULE_VERSION(VINO_MODULE_VERSION);
68MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
69MODULE_LICENSE("GPL");
34 70
35/* debugging? */ 71#define mem_map_reserve(p) set_bit(PG_reserved, &((p)->flags))
36#if 1 72#define mem_map_unreserve(p) clear_bit(PG_reserved, &((p)->flags))
37#define DEBUG(x...) printk(x); 73
74#ifdef VINO_DEBUG
75#define dprintk(x...) printk("VINO: " x);
38#else 76#else
39#define DEBUG(x...) 77#define dprintk(x...)
40#endif 78#endif
41 79
80#define VINO_NO_CHANNEL 0
81#define VINO_CHANNEL_A 1
82#define VINO_CHANNEL_B 2
83
84#define VINO_PAL_WIDTH 768
85#define VINO_PAL_HEIGHT 576
86#define VINO_NTSC_WIDTH 640
87#define VINO_NTSC_HEIGHT 480
88
89#define VINO_MIN_WIDTH 32
90#define VINO_MIN_HEIGHT 32
91
92#define VINO_CLIPPING_START_ODD_D1 1
93#define VINO_CLIPPING_START_ODD_PAL 1
94#define VINO_CLIPPING_START_ODD_NTSC 1
95
96#define VINO_CLIPPING_START_EVEN_D1 2
97#define VINO_CLIPPING_START_EVEN_PAL 2
98#define VINO_CLIPPING_START_EVEN_NTSC 2
99
100#define VINO_INPUT_CHANNEL_COUNT 3
101
102#define VINO_INPUT_NONE -1
103#define VINO_INPUT_COMPOSITE 0
104#define VINO_INPUT_SVIDEO 1
105#define VINO_INPUT_D1 2
106
107#define VINO_PAGE_RATIO (PAGE_SIZE / VINO_PAGE_SIZE)
108
109#define VINO_FIFO_THRESHOLD_DEFAULT 512
110
111/*#define VINO_FRAMEBUFFER_SIZE (VINO_PAL_WIDTH * VINO_PAL_HEIGHT * 4 \
112 + 2 * PAGE_SIZE)*/
113#define VINO_FRAMEBUFFER_SIZE ((VINO_PAL_WIDTH \
114 * VINO_PAL_HEIGHT * 4 \
115 + 3 * PAGE_SIZE) & ~(PAGE_SIZE - 1))
116
117#define VINO_FRAMEBUFFER_MAX_COUNT 8
118
119#define VINO_FRAMEBUFFER_UNUSED 0
120#define VINO_FRAMEBUFFER_IN_USE 1
121#define VINO_FRAMEBUFFER_READY 2
122
123#define VINO_QUEUE_ERROR -1
124#define VINO_QUEUE_MAGIC 0x20050125
125
126#define VINO_MEMORY_NONE 0
127#define VINO_MEMORY_MMAP 1
128#define VINO_MEMORY_USERPTR 2
129
130#define VINO_DUMMY_DESC_COUNT 4
131#define VINO_DESC_FETCH_DELAY 5 /* microseconds */
132
133/* the number is the index for vino_data_formats */
134#define VINO_DATA_FMT_NONE -1
135#define VINO_DATA_FMT_GREY 0
136#define VINO_DATA_FMT_RGB332 1
137#define VINO_DATA_FMT_RGB32 2
138#define VINO_DATA_FMT_YUV 3
139//#define VINO_DATA_FMT_RGB24 4
140
141#define VINO_DATA_FMT_COUNT 4
142
143#define VINO_DATA_NORM_NONE -1
144#define VINO_DATA_NORM_NTSC 0
145#define VINO_DATA_NORM_PAL 1
146#define VINO_DATA_NORM_SECAM 2
147#define VINO_DATA_NORM_D1 3
148/* The following is a special entry that can be used to
149 * autodetect the norm. */
150#define VINO_DATA_NORM_AUTO 0xff
151
152#define VINO_DATA_NORM_COUNT 4
42 153
43/* VINO ASIC registers */ 154/* Internal data structure definitions */
44struct sgi_vino *vino;
45 155
46static const char *vinostr = "VINO IndyCam/TV"; 156struct vino_input {
47static int threshold_a = 512; 157 char *name;
48static int threshold_b = 512; 158 v4l2_std_id std;
159};
160
161struct vino_clipping {
162 unsigned int left, right, top, bottom;
163};
164
165struct vino_data_format {
166 /* the description */
167 char *description;
168 /* bytes per pixel */
169 unsigned int bpp;
170 /* V4L2 fourcc code */
171 __u32 pixelformat;
172 /* V4L2 colorspace (duh!) */
173 enum v4l2_colorspace colorspace;
174};
175
176struct vino_data_norm {
177 char *description;
178 unsigned int width, height;
179 struct vino_clipping odd;
180 struct vino_clipping even;
181
182 v4l2_std_id std;
183 unsigned int fps_min, fps_max;
184 __u32 framelines;
185};
186
187struct vino_descriptor_table {
188 /* the number of PAGE_SIZE sized pages in the buffer */
189 unsigned int page_count;
190 /* virtual (kmalloc'd) pointers to the actual data
191 * (in PAGE_SIZE chunks, used with mmap streaming) */
192 unsigned long *virtual;
193
194 /* cpu address for the VINO descriptor table
195 * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
196 unsigned long *dma_cpu;
197 /* dma address for the VINO descriptor table
198 * (contains DMA addresses, VINO_PAGE_SIZE chunks) */
199 dma_addr_t dma;
200};
201
202struct vino_framebuffer {
203 /* identifier nubmer */
204 unsigned int id;
205 /* the length of the whole buffer */
206 unsigned int size;
207 /* the length of actual data in buffer */
208 unsigned int data_size;
209 /* the data format */
210 unsigned int data_format;
211 /* the state of buffer data */
212 unsigned int state;
213 /* is the buffer mapped in user space? */
214 unsigned int map_count;
215 /* memory offset for mmap() */
216 unsigned int offset;
217 /* frame counter */
218 unsigned int frame_counter;
219 /* timestamp (written when image capture finishes) */
220 struct timeval timestamp;
221
222 struct vino_descriptor_table desc_table;
223
224 spinlock_t state_lock;
225};
49 226
50struct vino_device { 227struct vino_framebuffer_fifo {
51 struct video_device vdev; 228 unsigned int length;
52#define VINO_CHAN_A 1 229
53#define VINO_CHAN_B 2 230 unsigned int used;
54 int chan; 231 unsigned int head;
232 unsigned int tail;
233
234 unsigned int data[VINO_FRAMEBUFFER_MAX_COUNT];
235};
236
237struct vino_framebuffer_queue {
238 unsigned int magic;
239
240 /* VINO_MEMORY_NONE, VINO_MEMORY_MMAP or VINO_MEMORY_USERPTR */
241 unsigned int type;
242 unsigned int length;
243
244 /* data field of in and out contain index numbers for buffer */
245 struct vino_framebuffer_fifo in;
246 struct vino_framebuffer_fifo out;
247
248 struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_MAX_COUNT];
249
250 spinlock_t queue_lock;
251 struct semaphore queue_sem;
252 wait_queue_head_t frame_wait_queue;
253};
254
255struct vino_channel_settings {
256 unsigned int channel;
257
258 int input;
259 unsigned int data_format;
260 unsigned int data_norm;
261 struct vino_clipping clipping;
262 unsigned int decimation;
263 unsigned int line_size;
264 unsigned int alpha;
265 unsigned int fps;
266 unsigned int framert_reg;
267
268 unsigned int fifo_threshold;
269
270 struct vino_framebuffer_queue fb_queue;
271
272 /* number of the current field */
273 unsigned int field;
274
275 /* read in progress */
276 int reading;
277 /* streaming is active */
278 int streaming;
279 /* the driver is currently processing the queue */
280 int capturing;
281
282 struct semaphore sem;
283 spinlock_t capture_lock;
284
285 unsigned int users;
286
287 /* V4L support */
288 struct video_device *v4l_device;
55}; 289};
56 290
57struct vino_client { 291struct vino_client {
292 /* the channel which owns this client:
293 * VINO_NO_CHANNEL, VINO_CHANNEL_A or VINO_CHANNEL_B */
294 unsigned int owner;
58 struct i2c_client *driver; 295 struct i2c_client *driver;
59 int owner;
60}; 296};
61 297
62struct vino_video { 298struct vino_settings {
63 struct vino_device chA; 299 struct vino_channel_settings a;
64 struct vino_device chB; 300 struct vino_channel_settings b;
65 301
66 struct vino_client decoder; 302 struct vino_client decoder;
67 struct vino_client camera; 303 struct vino_client camera;
68 304
69 struct semaphore input_lock; 305 /* a lock for vino register access */
306 spinlock_t vino_lock;
307 /* a lock for channel input changes */
308 spinlock_t input_lock;
70 309
71 /* Loaded into VINO descriptors to clear End Of Descriptors table
72 * interupt condition */
73 unsigned long dummy_page; 310 unsigned long dummy_page;
74 unsigned int dummy_buf[4] __attribute__((aligned(8))); 311 struct vino_descriptor_table dummy_desc_table;
75}; 312};
76 313
77static struct vino_video *Vino; 314/* Module parameters */
315
316/*
317 * Using vino_pixel_conversion the ARGB32-format pixels supplied
318 * by the VINO chip can be converted to more common formats
319 * like RGBA32 (or probably RGB24 in the future). This way we
320 * can give out data that can be specified correctly with
321 * the V4L2-definitions.
322 *
323 * The pixel format is specified as RGBA32 when no conversion
324 * is used.
325 *
326 * Note that this only affects the 32-bit bit depth.
327 *
328 * Use non-zero value to enable conversion.
329 */
330static int vino_pixel_conversion = 0;
331module_param_named(pixelconv, vino_pixel_conversion, int, 0);
332MODULE_PARM_DESC(pixelconv,
333 "enable pixel conversion (non-zero value enables)");
334
335/* Internal data structures */
336
337static struct sgi_vino *vino;
338
339static struct vino_settings *vino_drvdata;
340
341static const char *vino_driver_name = "vino";
342static const char *vino_driver_description = "SGI VINO";
343static const char *vino_bus_name = "GIO64 bus";
344static const char *vino_v4l_device_name_a = "SGI VINO Channel A";
345static const char *vino_v4l_device_name_b = "SGI VINO Channel B";
346
347static const struct vino_input vino_inputs[] = {
348 {
349 .name = "Composite",
350 .std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
351 },{
352 .name = "S-Video",
353 .std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
354 },{
355 .name = "D1 (IndyCam)",
356 .std = V4L2_STD_NTSC,
357 }
358};
359
360static const struct vino_data_format vino_data_formats[] = {
361 {
362 .description = "8-bit greyscale",
363 .bpp = 1,
364 .pixelformat = V4L2_PIX_FMT_GREY,
365 .colorspace = V4L2_COLORSPACE_SMPTE170M,
366 },{
367 .description = "8-bit dithered RGB 3-3-2",
368 .bpp = 1,
369 .pixelformat = V4L2_PIX_FMT_RGB332,
370 .colorspace = V4L2_COLORSPACE_SRGB,
371 },{
372 .description = "32-bit RGB",
373 .bpp = 4,
374 .pixelformat = V4L2_PIX_FMT_RGB32,
375 .colorspace = V4L2_COLORSPACE_SRGB,
376 },{
377 .description = "YUV 4:2:2",
378 .bpp = 4,
379 .pixelformat = V4L2_PIX_FMT_YUYV, // XXX: swapped?
380 .colorspace = V4L2_COLORSPACE_SMPTE170M,
381 }/*,{
382 .description = "24-bit RGB",
383 .bpp = 3,
384 .pixelformat = V4L2_PIX_FMT_RGB24,
385 .colorspace = V4L2_COLORSPACE_SRGB,
386 }*/
387};
388
389static const struct vino_data_norm vino_data_norms[] = {
390 {
391 .description = "NTSC",
392 .std = V4L2_STD_NTSC,
393 .fps_min = 6,
394 .fps_max = 30,
395 .framelines = 525,
396 .width = VINO_NTSC_WIDTH,
397 .height = VINO_NTSC_HEIGHT,
398 .odd = {
399 .top = VINO_CLIPPING_START_ODD_NTSC,
400 .left = 0,
401 .bottom = VINO_CLIPPING_START_ODD_NTSC
402 + VINO_NTSC_HEIGHT / 2 - 1,
403 .right = VINO_NTSC_WIDTH,
404 },
405 .even = {
406 .top = VINO_CLIPPING_START_EVEN_NTSC,
407 .left = 0,
408 .bottom = VINO_CLIPPING_START_EVEN_NTSC
409 + VINO_NTSC_HEIGHT / 2 - 1,
410 .right = VINO_NTSC_WIDTH,
411 },
412 },{
413 .description = "PAL",
414 .std = V4L2_STD_PAL,
415 .fps_min = 5,
416 .fps_max = 25,
417 .framelines = 625,
418 .width = VINO_PAL_WIDTH,
419 .height = VINO_PAL_HEIGHT,
420 .odd = {
421 .top = VINO_CLIPPING_START_ODD_PAL,
422 .left = 0,
423 .bottom = VINO_CLIPPING_START_ODD_PAL
424 + VINO_PAL_HEIGHT / 2 - 1,
425 .right = VINO_PAL_WIDTH,
426 },
427 .even = {
428 .top = VINO_CLIPPING_START_EVEN_PAL,
429 .left = 0,
430 .bottom = VINO_CLIPPING_START_EVEN_PAL
431 + VINO_PAL_HEIGHT / 2 - 1,
432 .right = VINO_PAL_WIDTH,
433 },
434 },{
435 .description = "SECAM",
436 .std = V4L2_STD_SECAM,
437 .fps_min = 5,
438 .fps_max = 25,
439 .framelines = 625,
440 .width = VINO_PAL_WIDTH,
441 .height = VINO_PAL_HEIGHT,
442 .odd = {
443 .top = VINO_CLIPPING_START_ODD_PAL,
444 .left = 0,
445 .bottom = VINO_CLIPPING_START_ODD_PAL
446 + VINO_PAL_HEIGHT / 2 - 1,
447 .right = VINO_PAL_WIDTH,
448 },
449 .even = {
450 .top = VINO_CLIPPING_START_EVEN_PAL,
451 .left = 0,
452 .bottom = VINO_CLIPPING_START_EVEN_PAL
453 + VINO_PAL_HEIGHT / 2 - 1,
454 .right = VINO_PAL_WIDTH,
455 },
456 },{
457 .description = "NTSC (D1 input)",
458 .std = V4L2_STD_NTSC,
459 .fps_min = 6,
460 .fps_max = 30,
461 .framelines = 525,
462 .width = VINO_NTSC_WIDTH,
463 .height = VINO_NTSC_HEIGHT,
464 .odd = {
465 .top = VINO_CLIPPING_START_ODD_D1,
466 .left = 0,
467 .bottom = VINO_CLIPPING_START_ODD_D1
468 + VINO_NTSC_HEIGHT / 2 - 1,
469 .right = VINO_NTSC_WIDTH,
470 },
471 .even = {
472 .top = VINO_CLIPPING_START_EVEN_D1,
473 .left = 0,
474 .bottom = VINO_CLIPPING_START_EVEN_D1
475 + VINO_NTSC_HEIGHT / 2 - 1,
476 .right = VINO_NTSC_WIDTH,
477 },
478 }
479};
480
481#define VINO_INDYCAM_V4L2_CONTROL_COUNT 9
482
483struct v4l2_queryctrl vino_indycam_v4l2_controls[] = {
484 {
485 .id = V4L2_CID_AUTOGAIN,
486 .type = V4L2_CTRL_TYPE_BOOLEAN,
487 .name = "Automatic Gain Control",
488 .minimum = 0,
489 .maximum = 1,
490 .step = 1,
491 .default_value = INDYCAM_AGC_DEFAULT,
492 .flags = 0,
493 .reserved = { 0, 0 },
494 },{
495 .id = V4L2_CID_AUTO_WHITE_BALANCE,
496 .type = V4L2_CTRL_TYPE_BOOLEAN,
497 .name = "Automatic White Balance",
498 .minimum = 0,
499 .maximum = 1,
500 .step = 1,
501 .default_value = INDYCAM_AWB_DEFAULT,
502 .flags = 0,
503 .reserved = { 0, 0 },
504 },{
505 .id = V4L2_CID_GAIN,
506 .type = V4L2_CTRL_TYPE_INTEGER,
507 .name = "Gain",
508 .minimum = INDYCAM_GAIN_MIN,
509 .maximum = INDYCAM_GAIN_MAX,
510 .step = 1,
511 .default_value = INDYCAM_GAIN_DEFAULT,
512 .flags = 0,
513 .reserved = { 0, 0 },
514 },{
515 .id = V4L2_CID_PRIVATE_BASE,
516 .type = V4L2_CTRL_TYPE_INTEGER,
517 .name = "Red Saturation",
518 .minimum = INDYCAM_RED_SATURATION_MIN,
519 .maximum = INDYCAM_RED_SATURATION_MAX,
520 .step = 1,
521 .default_value = INDYCAM_RED_SATURATION_DEFAULT,
522 .flags = 0,
523 .reserved = { 0, 0 },
524 },{
525 .id = V4L2_CID_PRIVATE_BASE + 1,
526 .type = V4L2_CTRL_TYPE_INTEGER,
527 .name = "Blue Saturation",
528 .minimum = INDYCAM_BLUE_SATURATION_MIN,
529 .maximum = INDYCAM_BLUE_SATURATION_MAX,
530 .step = 1,
531 .default_value = INDYCAM_BLUE_SATURATION_DEFAULT,
532 .flags = 0,
533 .reserved = { 0, 0 },
534 },{
535 .id = V4L2_CID_RED_BALANCE,
536 .type = V4L2_CTRL_TYPE_INTEGER,
537 .name = "Red Balance",
538 .minimum = INDYCAM_RED_BALANCE_MIN,
539 .maximum = INDYCAM_RED_BALANCE_MAX,
540 .step = 1,
541 .default_value = INDYCAM_RED_BALANCE_DEFAULT,
542 .flags = 0,
543 .reserved = { 0, 0 },
544 },{
545 .id = V4L2_CID_BLUE_BALANCE,
546 .type = V4L2_CTRL_TYPE_INTEGER,
547 .name = "Blue Balance",
548 .minimum = INDYCAM_BLUE_BALANCE_MIN,
549 .maximum = INDYCAM_BLUE_BALANCE_MAX,
550 .step = 1,
551 .default_value = INDYCAM_BLUE_BALANCE_DEFAULT,
552 .flags = 0,
553 .reserved = { 0, 0 },
554 },{
555 .id = V4L2_CID_EXPOSURE,
556 .type = V4L2_CTRL_TYPE_INTEGER,
557 .name = "Shutter Control",
558 .minimum = INDYCAM_SHUTTER_MIN,
559 .maximum = INDYCAM_SHUTTER_MAX,
560 .step = 1,
561 .default_value = INDYCAM_SHUTTER_DEFAULT,
562 .flags = 0,
563 .reserved = { 0, 0 },
564 },{
565 .id = V4L2_CID_GAMMA,
566 .type = V4L2_CTRL_TYPE_INTEGER,
567 .name = "Gamma",
568 .minimum = INDYCAM_GAMMA_MIN,
569 .maximum = INDYCAM_GAMMA_MAX,
570 .step = 1,
571 .default_value = INDYCAM_GAMMA_DEFAULT,
572 .flags = 0,
573 .reserved = { 0, 0 },
574 }
575};
576
577#define VINO_SAA7191_V4L2_CONTROL_COUNT 2
578
579struct v4l2_queryctrl vino_saa7191_v4l2_controls[] = {
580 {
581 .id = V4L2_CID_HUE,
582 .type = V4L2_CTRL_TYPE_INTEGER,
583 .name = "Hue",
584 .minimum = SAA7191_HUE_MIN,
585 .maximum = SAA7191_HUE_MAX,
586 .step = 1,
587 .default_value = SAA7191_HUE_DEFAULT,
588 .flags = 0,
589 .reserved = { 0, 0 },
590 },{
591 .id = V4L2_CID_PRIVATE_BASE,
592 .type = V4L2_CTRL_TYPE_BOOLEAN,
593 .name = "VTR Time Constant",
594 .minimum = SAA7191_VTRC_MIN,
595 .maximum = SAA7191_VTRC_MAX,
596 .step = 1,
597 .default_value = SAA7191_VTRC_DEFAULT,
598 .flags = 0,
599 .reserved = { 0, 0 },
600 }
601};
602
603/* VINO I2C bus functions */
78 604
79unsigned i2c_vino_getctrl(void *data) 605unsigned i2c_vino_getctrl(void *data)
80{ 606{
@@ -112,49 +638,49 @@ static struct i2c_algo_sgi_data i2c_sgi_vino_data =
112 */ 638 */
113static int i2c_vino_client_reg(struct i2c_client *client) 639static int i2c_vino_client_reg(struct i2c_client *client)
114{ 640{
115 int res = 0; 641 int ret = 0;
116 642
117 down(&Vino->input_lock); 643 spin_lock(&vino_drvdata->input_lock);
118 switch (client->driver->id) { 644 switch (client->driver->id) {
119 case I2C_DRIVERID_SAA7191: 645 case I2C_DRIVERID_SAA7191:
120 if (Vino->decoder.driver) 646 if (vino_drvdata->decoder.driver)
121 res = -EBUSY; 647 ret = -EBUSY;
122 else 648 else
123 Vino->decoder.driver = client; 649 vino_drvdata->decoder.driver = client;
124 break; 650 break;
125 case I2C_DRIVERID_INDYCAM: 651 case I2C_DRIVERID_INDYCAM:
126 if (Vino->camera.driver) 652 if (vino_drvdata->camera.driver)
127 res = -EBUSY; 653 ret = -EBUSY;
128 else 654 else
129 Vino->camera.driver = client; 655 vino_drvdata->camera.driver = client;
130 break; 656 break;
131 default: 657 default:
132 res = -ENODEV; 658 ret = -ENODEV;
133 } 659 }
134 up(&Vino->input_lock); 660 spin_unlock(&vino_drvdata->input_lock);
135 661
136 return res; 662 return ret;
137} 663}
138 664
139static int i2c_vino_client_unreg(struct i2c_client *client) 665static int i2c_vino_client_unreg(struct i2c_client *client)
140{ 666{
141 int res = 0; 667 int ret = 0;
142 668
143 down(&Vino->input_lock); 669 spin_lock(&vino_drvdata->input_lock);
144 if (client == Vino->decoder.driver) { 670 if (client == vino_drvdata->decoder.driver) {
145 if (Vino->decoder.owner) 671 if (vino_drvdata->decoder.owner != VINO_NO_CHANNEL)
146 res = -EBUSY; 672 ret = -EBUSY;
147 else 673 else
148 Vino->decoder.driver = NULL; 674 vino_drvdata->decoder.driver = NULL;
149 } else if (client == Vino->camera.driver) { 675 } else if (client == vino_drvdata->camera.driver) {
150 if (Vino->camera.owner) 676 if (vino_drvdata->camera.owner != VINO_NO_CHANNEL)
151 res = -EBUSY; 677 ret = -EBUSY;
152 else 678 else
153 Vino->camera.driver = NULL; 679 vino_drvdata->camera.driver = NULL;
154 } 680 }
155 up(&Vino->input_lock); 681 spin_unlock(&vino_drvdata->input_lock);
156 682
157 return res; 683 return ret;
158} 684}
159 685
160static struct i2c_adapter vino_i2c_adapter = 686static struct i2c_adapter vino_i2c_adapter =
@@ -176,172 +702,3591 @@ static int vino_i2c_del_bus(void)
176 return i2c_sgi_del_bus(&vino_i2c_adapter); 702 return i2c_sgi_del_bus(&vino_i2c_adapter);
177} 703}
178 704
705static int i2c_camera_command(unsigned int cmd, void *arg)
706{
707 return vino_drvdata->camera.driver->
708 driver->command(vino_drvdata->camera.driver,
709 cmd, arg);
710}
711
712static int i2c_decoder_command(unsigned int cmd, void *arg)
713{
714 return vino_drvdata->decoder.driver->
715 driver->command(vino_drvdata->decoder.driver,
716 cmd, arg);
717}
718
719/* VINO framebuffer/DMA descriptor management */
720
721static void vino_free_buffer_with_count(struct vino_framebuffer *fb,
722 unsigned int count)
723{
724 unsigned int i;
725
726 dprintk("vino_free_buffer_with_count(): count = %d\n", count);
727
728 for (i = 0; i < count; i++) {
729 mem_map_unreserve(virt_to_page(fb->desc_table.virtual[i]));
730 dma_unmap_single(NULL,
731 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
732 PAGE_SIZE, DMA_FROM_DEVICE);
733 free_page(fb->desc_table.virtual[i]);
734 }
735
736 dma_free_coherent(NULL,
737 VINO_PAGE_RATIO * (fb->desc_table.page_count + 4) *
738 sizeof(dma_addr_t), (void *)fb->desc_table.dma_cpu,
739 fb->desc_table.dma);
740 kfree(fb->desc_table.virtual);
741
742 memset(fb, 0, sizeof(struct vino_framebuffer));
743}
744
745static void vino_free_buffer(struct vino_framebuffer *fb)
746{
747 vino_free_buffer_with_count(fb, fb->desc_table.page_count);
748}
749
750static int vino_allocate_buffer(struct vino_framebuffer *fb,
751 unsigned int size)
752{
753 unsigned int count, i, j;
754 int ret = 0;
755
756 dprintk("vino_allocate_buffer():\n");
757
758 if (size < 1)
759 return -EINVAL;
760
761 memset(fb, 0, sizeof(struct vino_framebuffer));
762
763 count = ((size / PAGE_SIZE) + 4) & ~3;
764
765 dprintk("vino_allocate_buffer(): size = %d, count = %d\n",
766 size, count);
767
768 /* allocate memory for table with virtual (page) addresses */
769 fb->desc_table.virtual = (unsigned long *)
770 kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
771 if (!fb->desc_table.virtual)
772 return -ENOMEM;
773
774 /* allocate memory for table with dma addresses
775 * (has space for four extra descriptors) */
776 fb->desc_table.dma_cpu =
777 dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
778 sizeof(dma_addr_t), &fb->desc_table.dma,
779 GFP_KERNEL | GFP_DMA);
780 if (!fb->desc_table.dma_cpu) {
781 ret = -ENOMEM;
782 goto out_free_virtual;
783 }
784
785 /* allocate pages for the buffer and acquire the according
786 * dma addresses */
787 for (i = 0; i < count; i++) {
788 dma_addr_t dma_data_addr;
789
790 fb->desc_table.virtual[i] =
791 get_zeroed_page(GFP_KERNEL | GFP_DMA);
792 if (!fb->desc_table.virtual[i]) {
793 ret = -ENOBUFS;
794 break;
795 }
796
797 dma_data_addr =
798 dma_map_single(NULL,
799 (void *)fb->desc_table.virtual[i],
800 PAGE_SIZE, DMA_FROM_DEVICE);
801
802 for (j = 0; j < VINO_PAGE_RATIO; j++) {
803 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
804 dma_data_addr + VINO_PAGE_SIZE * j;
805 }
806
807 mem_map_reserve(virt_to_page(fb->desc_table.virtual[i]));
808 }
809
810 /* page_count needs to be set anyway, because the descriptor table has
811 * been allocated according to this number */
812 fb->desc_table.page_count = count;
813
814 if (ret) {
815 /* the descriptor with index i doesn't contain
816 * a valid address yet */
817 vino_free_buffer_with_count(fb, i);
818 return ret;
819 }
820
821 //fb->size = size;
822 fb->size = count * PAGE_SIZE;
823 fb->data_format = VINO_DATA_FMT_NONE;
824
825 /* set the dma stop-bit for the last (count+1)th descriptor */
826 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
827 return 0;
828
829 out_free_virtual:
830 kfree(fb->desc_table.virtual);
831 return ret;
832}
833
834#if 0
835/* user buffers not fully implemented yet */
836static int vino_prepare_user_buffer(struct vino_framebuffer *fb,
837 void *user,
838 unsigned int size)
839{
840 unsigned int count, i, j;
841 int ret = 0;
842
843 dprintk("vino_prepare_user_buffer():\n");
844
845 if (size < 1)
846 return -EINVAL;
847
848 memset(fb, 0, sizeof(struct vino_framebuffer));
849
850 count = ((size / PAGE_SIZE)) & ~3;
851
852 dprintk("vino_prepare_user_buffer(): size = %d, count = %d\n",
853 size, count);
854
855 /* allocate memory for table with virtual (page) addresses */
856 fb->desc_table.virtual = (unsigned long *)
857 kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
858 if (!fb->desc_table.virtual)
859 return -ENOMEM;
860
861 /* allocate memory for table with dma addresses
862 * (has space for four extra descriptors) */
863 fb->desc_table.dma_cpu =
864 dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
865 sizeof(dma_addr_t), &fb->desc_table.dma,
866 GFP_KERNEL | GFP_DMA);
867 if (!fb->desc_table.dma_cpu) {
868 ret = -ENOMEM;
869 goto out_free_virtual;
870 }
871
872 /* allocate pages for the buffer and acquire the according
873 * dma addresses */
874 for (i = 0; i < count; i++) {
875 dma_addr_t dma_data_addr;
876
877 fb->desc_table.virtual[i] =
878 get_zeroed_page(GFP_KERNEL | GFP_DMA);
879 if (!fb->desc_table.virtual[i]) {
880 ret = -ENOBUFS;
881 break;
882 }
883
884 dma_data_addr =
885 dma_map_single(NULL,
886 (void *)fb->desc_table.virtual[i],
887 PAGE_SIZE, DMA_FROM_DEVICE);
888
889 for (j = 0; j < VINO_PAGE_RATIO; j++) {
890 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
891 dma_data_addr + VINO_PAGE_SIZE * j;
892 }
893
894 mem_map_reserve(virt_to_page(fb->desc_table.virtual[i]));
895 }
896
897 /* page_count needs to be set anyway, because the descriptor table has
898 * been allocated according to this number */
899 fb->desc_table.page_count = count;
900
901 if (ret) {
902 /* the descriptor with index i doesn't contain
903 * a valid address yet */
904 vino_free_buffer_with_count(fb, i);
905 return ret;
906 }
907
908 //fb->size = size;
909 fb->size = count * PAGE_SIZE;
910
911 /* set the dma stop-bit for the last (count+1)th descriptor */
912 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
913 return 0;
914
915 out_free_virtual:
916 kfree(fb->desc_table.virtual);
917 return ret;
918}
919#endif
920
921static void vino_sync_buffer(struct vino_framebuffer *fb)
922{
923 int i;
924
925 dprintk("vino_sync_buffer():\n");
926
927 for (i = 0; i < fb->desc_table.page_count; i++)
928 dma_sync_single(NULL,
929 fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
930 PAGE_SIZE, DMA_FROM_DEVICE);
931}
932
933/* Framebuffer fifo functions (need to be locked externally) */
934
935static void vino_fifo_init(struct vino_framebuffer_fifo *f,
936 unsigned int length)
937{
938 f->length = 0;
939 f->used = 0;
940 f->head = 0;
941 f->tail = 0;
942
943 if (length > VINO_FRAMEBUFFER_MAX_COUNT)
944 length = VINO_FRAMEBUFFER_MAX_COUNT;
945
946 f->length = length;
947}
948
949/* returns true/false */
950static int vino_fifo_has_id(struct vino_framebuffer_fifo *f, unsigned int id)
951{
952 unsigned int i;
953 for (i = f->head; i == (f->tail - 1); i = (i + 1) % f->length) {
954 if (f->data[i] == id)
955 return 1;
956 }
957
958 return 0;
959}
960
961/* returns true/false */
962static int vino_fifo_full(struct vino_framebuffer_fifo *f)
963{
964 return (f->used == f->length);
965}
966
967static unsigned int vino_fifo_get_used(struct vino_framebuffer_fifo *f)
968{
969 return f->used;
970}
179 971
180static void vino_interrupt(int irq, void *dev_id, struct pt_regs *regs) 972static int vino_fifo_enqueue(struct vino_framebuffer_fifo *f, unsigned int id)
181{ 973{
974 if (id >= f->length) {
975 return VINO_QUEUE_ERROR;
976 }
977
978 if (vino_fifo_has_id(f, id)) {
979 return VINO_QUEUE_ERROR;
980 }
981
982 if (f->used < f->length) {
983 f->data[f->tail] = id;
984 f->tail = (f->tail + 1) % f->length;
985 f->used++;
986 } else {
987 return VINO_QUEUE_ERROR;
988 }
989
990 return 0;
182} 991}
183 992
184static int vino_open(struct video_device *dev, int flags) 993static int vino_fifo_peek(struct vino_framebuffer_fifo *f, unsigned int *id)
185{ 994{
186 struct vino_device *videv = (struct vino_device *)dev; 995 if (f->used > 0) {
996 *id = f->data[f->head];
997 } else {
998 return VINO_QUEUE_ERROR;
999 }
187 1000
188 return 0; 1001 return 0;
189} 1002}
190 1003
191static void vino_close(struct video_device *dev) 1004static int vino_fifo_dequeue(struct vino_framebuffer_fifo *f, unsigned int *id)
192{ 1005{
193 struct vino_device *videv = (struct vino_device *)dev; 1006 if (f->used > 0) {
1007 *id = f->data[f->head];
1008 f->head = (f->head + 1) % f->length;
1009 f->used--;
1010 } else {
1011 return VINO_QUEUE_ERROR;
1012 }
1013
1014 return 0;
194} 1015}
195 1016
196static int vino_mmap(struct video_device *dev, const char *adr, 1017/* Framebuffer queue functions */
197 unsigned long size) 1018
1019/* execute with queue_lock locked */
1020static void vino_queue_free_with_count(struct vino_framebuffer_queue *q,
1021 unsigned int length)
198{ 1022{
199 struct vino_device *videv = (struct vino_device *)dev; 1023 unsigned int i;
200 1024
201 return -EINVAL; 1025 q->length = 0;
1026 memset(&q->in, 0, sizeof(struct vino_framebuffer_fifo));
1027 memset(&q->out, 0, sizeof(struct vino_framebuffer_fifo));
1028 for (i = 0; i < length; i++) {
1029 dprintk("vino_queue_free_with_count(): freeing buffer %d\n",
1030 i);
1031 vino_free_buffer(q->buffer[i]);
1032 kfree(q->buffer[i]);
1033 }
1034
1035 q->type = VINO_MEMORY_NONE;
1036 q->magic = 0;
202} 1037}
203 1038
204static int vino_ioctl(struct video_device *dev, unsigned int cmd, void *arg) 1039static void vino_queue_free(struct vino_framebuffer_queue *q)
205{ 1040{
206 struct vino_device *videv = (struct vino_device *)dev; 1041 dprintk("vino_queue_free():\n");
1042
1043 if (q->magic != VINO_QUEUE_MAGIC)
1044 return;
1045 if (q->type != VINO_MEMORY_MMAP)
1046 return;
1047
1048 down(&q->queue_sem);
1049
1050 vino_queue_free_with_count(q, q->length);
1051
1052 up(&q->queue_sem);
1053}
1054
1055static int vino_queue_init(struct vino_framebuffer_queue *q,
1056 unsigned int *length)
1057{
1058 unsigned int i;
1059 int ret = 0;
1060
1061 dprintk("vino_queue_init(): length = %d\n", *length);
1062
1063 if (q->magic == VINO_QUEUE_MAGIC) {
1064 dprintk("vino_queue_init(): queue already initialized!\n");
1065 return -EINVAL;
1066 }
1067
1068 if (q->type != VINO_MEMORY_NONE) {
1069 dprintk("vino_queue_init(): queue already initialized!\n");
1070 return -EINVAL;
1071 }
1072
1073 if (*length < 1)
1074 return -EINVAL;
1075
1076 down(&q->queue_sem);
1077
1078 if (*length > VINO_FRAMEBUFFER_MAX_COUNT)
1079 *length = VINO_FRAMEBUFFER_MAX_COUNT;
1080
1081 q->length = 0;
1082
1083 for (i = 0; i < *length; i++) {
1084 dprintk("vino_queue_init(): allocating buffer %d\n", i);
1085 q->buffer[i] = kmalloc(sizeof(struct vino_framebuffer),
1086 GFP_KERNEL);
1087 if (!q->buffer[i]) {
1088 dprintk("vino_queue_init(): kmalloc() failed\n");
1089 ret = -ENOMEM;
1090 break;
1091 }
1092
1093 ret = vino_allocate_buffer(q->buffer[i],
1094 VINO_FRAMEBUFFER_SIZE);
1095 if (ret) {
1096 kfree(q->buffer[i]);
1097 dprintk("vino_queue_init(): "
1098 "vino_allocate_buffer() failed\n");
1099 break;
1100 }
1101
1102 q->buffer[i]->id = i;
1103 if (i > 0) {
1104 q->buffer[i]->offset = q->buffer[i - 1]->offset +
1105 q->buffer[i - 1]->size;
1106 } else {
1107 q->buffer[i]->offset = 0;
1108 }
1109
1110 spin_lock_init(&q->buffer[i]->state_lock);
1111
1112 dprintk("vino_queue_init(): buffer = %d, offset = %d, "
1113 "size = %d\n", i, q->buffer[i]->offset,
1114 q->buffer[i]->size);
1115 }
1116
1117 if (ret) {
1118 vino_queue_free_with_count(q, i);
1119 *length = 0;
1120 } else {
1121 q->length = *length;
1122 vino_fifo_init(&q->in, q->length);
1123 vino_fifo_init(&q->out, q->length);
1124 q->type = VINO_MEMORY_MMAP;
1125 q->magic = VINO_QUEUE_MAGIC;
1126 }
1127
1128 up(&q->queue_sem);
1129
1130 return ret;
1131}
1132
1133static struct vino_framebuffer *vino_queue_add(struct
1134 vino_framebuffer_queue *q,
1135 unsigned int id)
1136{
1137 struct vino_framebuffer *ret = NULL;
1138 unsigned int total;
1139 unsigned long flags;
1140
1141 dprintk("vino_queue_add(): id = %d\n", id);
1142
1143 if (q->magic != VINO_QUEUE_MAGIC) {
1144 return ret;
1145 }
1146
1147 spin_lock_irqsave(&q->queue_lock, flags);
1148
1149 if (q->length == 0)
1150 goto out;
1151
1152 if (id >= q->length)
1153 goto out;
1154
1155 /* not needed?: if (vino_fifo_full(&q->out)) {
1156 goto out;
1157 }*/
1158 /* check that outgoing queue isn't already full
1159 * (or that it won't become full) */
1160 total = vino_fifo_get_used(&q->in) +
1161 vino_fifo_get_used(&q->out);
1162 if (total >= q->length)
1163 goto out;
1164
1165 if (vino_fifo_enqueue(&q->in, id))
1166 goto out;
1167
1168 ret = q->buffer[id];
1169
1170out:
1171 spin_unlock_irqrestore(&q->queue_lock, flags);
1172
1173 return ret;
1174}
1175
1176static struct vino_framebuffer *vino_queue_transfer(struct
1177 vino_framebuffer_queue *q)
1178{
1179 struct vino_framebuffer *ret = NULL;
1180 struct vino_framebuffer *fb;
1181 int id;
1182 unsigned long flags;
1183
1184 dprintk("vino_queue_transfer():\n");
1185
1186 if (q->magic != VINO_QUEUE_MAGIC) {
1187 return ret;
1188 }
1189
1190 spin_lock_irqsave(&q->queue_lock, flags);
1191
1192 if (q->length == 0)
1193 goto out;
1194
1195 // now this actually removes an entry from the incoming queue
1196 if (vino_fifo_dequeue(&q->in, &id)) {
1197 goto out;
1198 }
1199
1200 dprintk("vino_queue_transfer(): id = %d\n", id);
1201 fb = q->buffer[id];
1202
1203 // we have already checked that the outgoing queue is not full, but...
1204 if (vino_fifo_enqueue(&q->out, id)) {
1205 printk(KERN_ERR "vino_queue_transfer(): "
1206 "outgoing queue is full, this shouldn't happen!\n");
1207 goto out;
1208 }
1209
1210 ret = fb;
1211out:
1212 spin_unlock_irqrestore(&q->queue_lock, flags);
1213
1214 return ret;
1215}
1216
1217/* returns true/false */
1218static int vino_queue_incoming_contains(struct vino_framebuffer_queue *q,
1219 unsigned int id)
1220{
1221 int ret = 0;
1222 unsigned long flags;
1223
1224 if (q->magic != VINO_QUEUE_MAGIC) {
1225 return ret;
1226 }
1227
1228 spin_lock_irqsave(&q->queue_lock, flags);
1229
1230 if (q->length == 0)
1231 goto out;
1232
1233 ret = vino_fifo_has_id(&q->in, id);
1234
1235out:
1236 spin_unlock_irqrestore(&q->queue_lock, flags);
1237
1238 return ret;
1239}
1240
1241/* returns true/false */
1242static int vino_queue_outgoing_contains(struct vino_framebuffer_queue *q,
1243 unsigned int id)
1244{
1245 int ret = 0;
1246 unsigned long flags;
1247
1248 if (q->magic != VINO_QUEUE_MAGIC) {
1249 return ret;
1250 }
1251
1252 spin_lock_irqsave(&q->queue_lock, flags);
1253
1254 if (q->length == 0)
1255 goto out;
1256
1257 ret = vino_fifo_has_id(&q->out, id);
1258
1259out:
1260 spin_unlock_irqrestore(&q->queue_lock, flags);
1261
1262 return ret;
1263}
1264
1265static int vino_queue_get_incoming(struct vino_framebuffer_queue *q,
1266 unsigned int *used)
1267{
1268 int ret = 0;
1269 unsigned long flags;
1270
1271 if (q->magic != VINO_QUEUE_MAGIC) {
1272 return VINO_QUEUE_ERROR;
1273 }
1274
1275 spin_lock_irqsave(&q->queue_lock, flags);
1276
1277 if (q->length == 0) {
1278 ret = VINO_QUEUE_ERROR;
1279 goto out;
1280 }
1281
1282 *used = vino_fifo_get_used(&q->in);
1283
1284out:
1285 spin_unlock_irqrestore(&q->queue_lock, flags);
1286
1287 return ret;
1288}
1289
1290static int vino_queue_get_outgoing(struct vino_framebuffer_queue *q,
1291 unsigned int *used)
1292{
1293 int ret = 0;
1294 unsigned long flags;
1295
1296 if (q->magic != VINO_QUEUE_MAGIC) {
1297 return VINO_QUEUE_ERROR;
1298 }
1299
1300 spin_lock_irqsave(&q->queue_lock, flags);
1301
1302 if (q->length == 0) {
1303 ret = VINO_QUEUE_ERROR;
1304 goto out;
1305 }
1306
1307 *used = vino_fifo_get_used(&q->out);
1308
1309out:
1310 spin_unlock_irqrestore(&q->queue_lock, flags);
1311
1312 return ret;
1313}
1314
1315static int vino_queue_get_total(struct vino_framebuffer_queue *q,
1316 unsigned int *total)
1317{
1318 int ret = 0;
1319 unsigned long flags;
1320
1321 if (q->magic != VINO_QUEUE_MAGIC) {
1322 return VINO_QUEUE_ERROR;
1323 }
1324
1325 spin_lock_irqsave(&q->queue_lock, flags);
1326
1327 if (q->length == 0) {
1328 ret = VINO_QUEUE_ERROR;
1329 goto out;
1330 }
1331
1332 *total = vino_fifo_get_used(&q->in) +
1333 vino_fifo_get_used(&q->out);
1334
1335out:
1336 spin_unlock_irqrestore(&q->queue_lock, flags);
1337
1338 return ret;
1339}
1340
1341static struct vino_framebuffer *vino_queue_peek(struct
1342 vino_framebuffer_queue *q,
1343 unsigned int *id)
1344{
1345 struct vino_framebuffer *ret = NULL;
1346 unsigned long flags;
1347
1348 if (q->magic != VINO_QUEUE_MAGIC) {
1349 return ret;
1350 }
1351
1352 spin_lock_irqsave(&q->queue_lock, flags);
1353
1354 if (q->length == 0)
1355 goto out;
1356
1357 if (vino_fifo_peek(&q->in, id)) {
1358 goto out;
1359 }
1360
1361 ret = q->buffer[*id];
1362out:
1363 spin_unlock_irqrestore(&q->queue_lock, flags);
1364
1365 return ret;
1366}
1367
1368static struct vino_framebuffer *vino_queue_remove(struct
1369 vino_framebuffer_queue *q,
1370 unsigned int *id)
1371{
1372 struct vino_framebuffer *ret = NULL;
1373 unsigned long flags;
1374 dprintk("vino_queue_remove():\n");
1375
1376 if (q->magic != VINO_QUEUE_MAGIC) {
1377 return ret;
1378 }
1379
1380 spin_lock_irqsave(&q->queue_lock, flags);
1381
1382 if (q->length == 0)
1383 goto out;
1384
1385 if (vino_fifo_dequeue(&q->out, id)) {
1386 goto out;
1387 }
1388
1389 dprintk("vino_queue_remove(): id = %d\n", *id);
1390 ret = q->buffer[*id];
1391out:
1392 spin_unlock_irqrestore(&q->queue_lock, flags);
1393
1394 return ret;
1395}
1396
1397static struct
1398vino_framebuffer *vino_queue_get_buffer(struct vino_framebuffer_queue *q,
1399 unsigned int id)
1400{
1401 struct vino_framebuffer *ret = NULL;
1402 unsigned long flags;
1403
1404 if (q->magic != VINO_QUEUE_MAGIC) {
1405 return ret;
1406 }
1407
1408 spin_lock_irqsave(&q->queue_lock, flags);
1409
1410 if (q->length == 0)
1411 goto out;
1412
1413 if (id >= q->length)
1414 goto out;
1415
1416 ret = q->buffer[id];
1417 out:
1418 spin_unlock_irqrestore(&q->queue_lock, flags);
1419
1420 return ret;
1421}
1422
1423static unsigned int vino_queue_get_length(struct vino_framebuffer_queue *q)
1424{
1425 unsigned int length = 0;
1426 unsigned long flags;
1427
1428 if (q->magic != VINO_QUEUE_MAGIC) {
1429 return length;
1430 }
1431
1432 spin_lock_irqsave(&q->queue_lock, flags);
1433 length = q->length;
1434 spin_unlock_irqrestore(&q->queue_lock, flags);
1435
1436 return length;
1437}
1438
1439static int vino_queue_has_mapped_buffers(struct vino_framebuffer_queue *q)
1440{
1441 unsigned int i;
1442 int ret = 0;
1443 unsigned long flags;
1444
1445 if (q->magic != VINO_QUEUE_MAGIC) {
1446 return ret;
1447 }
1448
1449 spin_lock_irqsave(&q->queue_lock, flags);
1450 for (i = 0; i < q->length; i++) {
1451 if (q->buffer[i]->map_count > 0) {
1452 ret = 1;
1453 break;
1454 }
1455 }
1456 spin_unlock_irqrestore(&q->queue_lock, flags);
1457
1458 return ret;
1459}
1460
1461/* VINO functions */
1462
1463/* execute with input_lock locked */
1464static void vino_update_line_size(struct vino_channel_settings *vcs)
1465{
1466 unsigned int w = vcs->clipping.right - vcs->clipping.left;
1467 unsigned int d = vcs->decimation;
1468 unsigned int bpp = vino_data_formats[vcs->data_format].bpp;
1469 unsigned int lsize;
1470
1471 dprintk("update_line_size(): before: w = %d, d = %d, "
1472 "line_size = %d\n", w, d, vcs->line_size);
1473 /* line size must be multiple of 8 bytes */
1474 lsize = (bpp * (w / d)) & ~7;
1475 w = (lsize / bpp) * d;
1476
1477 vcs->clipping.right = vcs->clipping.left + w;
1478 vcs->line_size = lsize;
1479 dprintk("update_line_size(): after: w = %d, d = %d, "
1480 "line_size = %d\n", w, d, vcs->line_size);
1481}
1482
1483/* execute with input_lock locked */
1484static void vino_set_clipping(struct vino_channel_settings *vcs,
1485 unsigned int x, unsigned int y,
1486 unsigned int w, unsigned int h)
1487{
1488 unsigned int maxwidth, maxheight;
1489 unsigned int d;
1490
1491 maxwidth = vino_data_norms[vcs->data_norm].width;
1492 maxheight = vino_data_norms[vcs->data_norm].height;
1493 d = vcs->decimation;
1494
1495 y &= ~1; /* odd/even fields */
1496
1497 if (x > maxwidth) {
1498 x = 0;
1499 }
1500 if (y > maxheight) {
1501 y = 0;
1502 }
1503
1504 if (((w / d) < VINO_MIN_WIDTH)
1505 || ((h / d) < VINO_MIN_HEIGHT)) {
1506 w = VINO_MIN_WIDTH * d;
1507 h = VINO_MIN_HEIGHT * d;
1508 }
1509
1510 if ((x + w) > maxwidth) {
1511 w = maxwidth - x;
1512 if ((w / d) < VINO_MIN_WIDTH)
1513 x = maxwidth - VINO_MIN_WIDTH * d;
1514 }
1515 if ((y + h) > maxheight) {
1516 h = maxheight - y;
1517 if ((h / d) < VINO_MIN_HEIGHT)
1518 y = maxheight - VINO_MIN_HEIGHT * d;
1519 }
1520
1521 vcs->clipping.left = x;
1522 vcs->clipping.top = y;
1523 vcs->clipping.right = x + w;
1524 vcs->clipping.bottom = y + h;
1525
1526 vino_update_line_size(vcs);
1527
1528 dprintk("clipping %d, %d, %d, %d / %d - %d\n",
1529 vcs->clipping.left, vcs->clipping.top, vcs->clipping.right,
1530 vcs->clipping.bottom, vcs->decimation, vcs->line_size);
1531}
1532
1533/* execute with input_lock locked */
1534static void vino_set_default_clipping(struct vino_channel_settings *vcs)
1535{
1536 vino_set_clipping(vcs, 0, 0, vino_data_norms[vcs->data_norm].width,
1537 vino_data_norms[vcs->data_norm].height);
1538}
1539
1540/* execute with input_lock locked */
1541static void vino_set_scaling(struct vino_channel_settings *vcs,
1542 unsigned int w, unsigned int h)
1543{
1544 unsigned int x, y, curw, curh, d;
1545
1546 x = vcs->clipping.left;
1547 y = vcs->clipping.top;
1548 curw = vcs->clipping.right - vcs->clipping.left;
1549 curh = vcs->clipping.bottom - vcs->clipping.top;
1550
1551 d = max(curw / w, curh / h);
1552
1553 dprintk("scaling w: %d, h: %d, curw: %d, curh: %d, d: %d\n",
1554 w, h, curw, curh, d);
1555
1556 if (d < 1) {
1557 d = 1;
1558 }
1559 if (d > 8) {
1560 d = 8;
1561 }
1562
1563 vcs->decimation = d;
1564 vino_set_clipping(vcs, x, y, w * d, h * d);
1565
1566 dprintk("scaling %d, %d, %d, %d / %d - %d\n", vcs->clipping.left,
1567 vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom,
1568 vcs->decimation, vcs->line_size);
1569}
1570
1571/* execute with input_lock locked */
1572static void vino_reset_scaling(struct vino_channel_settings *vcs)
1573{
1574 vino_set_scaling(vcs, vcs->clipping.right - vcs->clipping.left,
1575 vcs->clipping.bottom - vcs->clipping.top);
1576}
1577
1578/* execute with input_lock locked */
1579static void vino_set_framerate(struct vino_channel_settings *vcs,
1580 unsigned int fps)
1581{
1582 unsigned int mask;
1583
1584 switch (vcs->data_norm) {
1585 case VINO_DATA_NORM_NTSC:
1586 case VINO_DATA_NORM_D1:
1587 fps = (unsigned int)(fps / 6) * 6; // FIXME: round!
1588
1589 if (fps < vino_data_norms[vcs->data_norm].fps_min)
1590 fps = vino_data_norms[vcs->data_norm].fps_min;
1591 if (fps > vino_data_norms[vcs->data_norm].fps_max)
1592 fps = vino_data_norms[vcs->data_norm].fps_max;
1593
1594 switch (fps) {
1595 case 6:
1596 mask = 0x003;
1597 break;
1598 case 12:
1599 mask = 0x0c3;
1600 break;
1601 case 18:
1602 mask = 0x333;
1603 break;
1604 case 24:
1605 mask = 0x3ff;
1606 break;
1607 case 30:
1608 mask = 0xfff;
1609 break;
1610 default:
1611 mask = VINO_FRAMERT_FULL;
1612 }
1613 vcs->framert_reg = VINO_FRAMERT_RT(mask);
1614 break;
1615 case VINO_DATA_NORM_PAL:
1616 case VINO_DATA_NORM_SECAM:
1617 fps = (unsigned int)(fps / 5) * 5; // FIXME: round!
1618
1619 if (fps < vino_data_norms[vcs->data_norm].fps_min)
1620 fps = vino_data_norms[vcs->data_norm].fps_min;
1621 if (fps > vino_data_norms[vcs->data_norm].fps_max)
1622 fps = vino_data_norms[vcs->data_norm].fps_max;
1623
1624 switch (fps) {
1625 case 5:
1626 mask = 0x003;
1627 break;
1628 case 10:
1629 mask = 0x0c3;
1630 break;
1631 case 15:
1632 mask = 0x333;
1633 break;
1634 case 20:
1635 mask = 0x0ff;
1636 break;
1637 case 25:
1638 mask = 0x3ff;
1639 break;
1640 default:
1641 mask = VINO_FRAMERT_FULL;
1642 }
1643 vcs->framert_reg = VINO_FRAMERT_RT(mask) | VINO_FRAMERT_PAL;
1644 break;
1645 }
1646
1647 vcs->fps = fps;
1648}
1649
1650/* execute with input_lock locked */
1651static void vino_set_default_framerate(struct vino_channel_settings *vcs)
1652{
1653 vino_set_framerate(vcs, vino_data_norms[vcs->data_norm].fps_max);
1654}
1655
1656/*
1657 * Prepare VINO for DMA transfer...
1658 * (execute only with vino_lock and input_lock locked)
1659 */
1660static int vino_dma_setup(struct vino_channel_settings *vcs,
1661 struct vino_framebuffer *fb)
1662{
1663 u32 ctrl, intr;
1664 struct sgi_vino_channel *ch;
1665 const struct vino_data_norm *norm;
1666
1667 dprintk("vino_dma_setup():\n");
1668
1669 vcs->field = 0;
1670 fb->frame_counter = 0;
1671
1672 ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
1673 norm = &vino_data_norms[vcs->data_norm];
1674
1675 ch->page_index = 0;
1676 ch->line_count = 0;
1677
1678 /* VINO line size register is set 8 bytes less than actual */
1679 ch->line_size = vcs->line_size - 8;
1680
1681 /* let VINO know where to transfer data */
1682 ch->start_desc_tbl = fb->desc_table.dma;
1683 ch->next_4_desc = fb->desc_table.dma;
1684
1685 /* give vino time to fetch the first four descriptors, 5 usec
1686 * should be more than enough time */
1687 udelay(VINO_DESC_FETCH_DELAY);
1688
1689 /* set the alpha register */
1690 ch->alpha = vcs->alpha;
1691
1692 /* set clipping registers */
1693 ch->clip_start = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.top / 2) |
1694 VINO_CLIP_EVEN(norm->even.top +
1695 vcs->clipping.top / 2) |
1696 VINO_CLIP_X(vcs->clipping.left);
1697 ch->clip_end = VINO_CLIP_ODD(norm->odd.top +
1698 vcs->clipping.bottom / 2 - 1) |
1699 VINO_CLIP_EVEN(norm->even.top +
1700 vcs->clipping.bottom / 2 - 1) |
1701 VINO_CLIP_X(vcs->clipping.right);
1702 /* FIXME: end-of-field bug workaround
1703 VINO_CLIP_X(VINO_PAL_WIDTH);
1704 */
1705
1706 /* set the size of actual content in the buffer (DECIMATION !) */
1707 fb->data_size = ((vcs->clipping.right - vcs->clipping.left) /
1708 vcs->decimation) *
1709 ((vcs->clipping.bottom - vcs->clipping.top) /
1710 vcs->decimation) *
1711 vino_data_formats[vcs->data_format].bpp;
1712
1713 ch->frame_rate = vcs->framert_reg;
1714
1715 ctrl = vino->control;
1716 intr = vino->intr_status;
1717
1718 if (vcs->channel == VINO_CHANNEL_A) {
1719 /* All interrupt conditions for this channel was cleared
1720 * so clear the interrupt status register and enable
1721 * interrupts */
1722 intr &= ~VINO_INTSTAT_A;
1723 ctrl |= VINO_CTRL_A_INT;
1724
1725 /* enable synchronization */
1726 ctrl |= VINO_CTRL_A_SYNC_ENBL;
1727
1728 /* enable frame assembly */
1729 ctrl |= VINO_CTRL_A_INTERLEAVE_ENBL;
1730
1731 /* set decimation used */
1732 if (vcs->decimation < 2)
1733 ctrl &= ~VINO_CTRL_A_DEC_ENBL;
1734 else {
1735 ctrl |= VINO_CTRL_A_DEC_ENBL;
1736 ctrl &= ~VINO_CTRL_A_DEC_SCALE_MASK;
1737 ctrl |= (vcs->decimation - 1) <<
1738 VINO_CTRL_A_DEC_SCALE_SHIFT;
1739 }
1740
1741 /* select input interface */
1742 if (vcs->input == VINO_INPUT_D1)
1743 ctrl |= VINO_CTRL_A_SELECT;
1744 else
1745 ctrl &= ~VINO_CTRL_A_SELECT;
1746
1747 /* palette */
1748 ctrl &= ~(VINO_CTRL_A_LUMA_ONLY | VINO_CTRL_A_RGB |
1749 VINO_CTRL_A_DITHER);
1750 } else {
1751 intr &= ~VINO_INTSTAT_B;
1752 ctrl |= VINO_CTRL_B_INT;
1753
1754 ctrl |= VINO_CTRL_B_SYNC_ENBL;
1755 ctrl |= VINO_CTRL_B_INTERLEAVE_ENBL;
1756
1757 if (vcs->decimation < 2)
1758 ctrl &= ~VINO_CTRL_B_DEC_ENBL;
1759 else {
1760 ctrl |= VINO_CTRL_B_DEC_ENBL;
1761 ctrl &= ~VINO_CTRL_B_DEC_SCALE_MASK;
1762 ctrl |= (vcs->decimation - 1) <<
1763 VINO_CTRL_B_DEC_SCALE_SHIFT;
1764
1765 }
1766 if (vcs->input == VINO_INPUT_D1)
1767 ctrl |= VINO_CTRL_B_SELECT;
1768 else
1769 ctrl &= ~VINO_CTRL_B_SELECT;
1770
1771 ctrl &= ~(VINO_CTRL_B_LUMA_ONLY | VINO_CTRL_B_RGB |
1772 VINO_CTRL_B_DITHER);
1773 }
1774
1775 /* set palette */
1776 fb->data_format = vcs->data_format;
1777
1778 switch (vcs->data_format) {
1779 case VINO_DATA_FMT_GREY:
1780 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1781 VINO_CTRL_A_LUMA_ONLY : VINO_CTRL_B_LUMA_ONLY;
1782 break;
1783 case VINO_DATA_FMT_RGB32:
1784 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1785 VINO_CTRL_A_RGB : VINO_CTRL_B_RGB;
1786 break;
1787 case VINO_DATA_FMT_YUV:
1788 /* nothing needs to be done */
1789 break;
1790 case VINO_DATA_FMT_RGB332:
1791 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1792 VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER :
1793 VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER;
1794 break;
1795 }
1796
1797 vino->intr_status = intr;
1798 vino->control = ctrl;
1799
1800 return 0;
1801}
1802
1803/* (execute only with vino_lock locked) */
1804static void vino_dma_start(struct vino_channel_settings *vcs)
1805{
1806 u32 ctrl = vino->control;
1807
1808 dprintk("vino_dma_start():\n");
1809 ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
1810 VINO_CTRL_A_DMA_ENBL : VINO_CTRL_B_DMA_ENBL;
1811 vino->control = ctrl;
1812}
1813
1814/* (execute only with vino_lock locked) */
1815static void vino_dma_stop(struct vino_channel_settings *vcs)
1816{
1817 u32 ctrl = vino->control;
1818
1819 ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
1820 ~VINO_CTRL_A_DMA_ENBL : ~VINO_CTRL_B_DMA_ENBL;
1821 vino->control = ctrl;
1822 dprintk("vino_dma_stop():\n");
1823}
1824
1825/*
1826 * Load dummy page to descriptor registers. This prevents generating of
1827 * spurious interrupts. (execute only with vino_lock locked)
1828 */
1829static void vino_clear_interrupt(struct vino_channel_settings *vcs)
1830{
1831 struct sgi_vino_channel *ch;
1832
1833 ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
1834
1835 ch->page_index = 0;
1836 ch->line_count = 0;
1837
1838 ch->start_desc_tbl = vino_drvdata->dummy_desc_table.dma;
1839 ch->next_4_desc = vino_drvdata->dummy_desc_table.dma;
1840
1841 udelay(VINO_DESC_FETCH_DELAY);
1842 dprintk("channel %c clear interrupt condition\n",
1843 (vcs->channel == VINO_CHANNEL_A) ? 'A':'B');
1844}
1845
1846static int vino_capture(struct vino_channel_settings *vcs,
1847 struct vino_framebuffer *fb)
1848{
1849 int err = 0;
1850 unsigned long flags, flags2;
1851
1852 spin_lock_irqsave(&fb->state_lock, flags);
1853
1854 if (fb->state == VINO_FRAMEBUFFER_IN_USE)
1855 err = -EBUSY;
1856 fb->state = VINO_FRAMEBUFFER_IN_USE;
1857
1858 spin_unlock_irqrestore(&fb->state_lock, flags);
1859
1860 if (err)
1861 return err;
1862
1863 spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
1864 spin_lock_irqsave(&vino_drvdata->input_lock, flags2);
1865
1866 vino_dma_setup(vcs, fb);
1867 vino_dma_start(vcs);
1868
1869 spin_unlock_irqrestore(&vino_drvdata->input_lock, flags2);
1870 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
1871
1872 return err;
1873}
1874
1875static
1876struct vino_framebuffer *vino_capture_enqueue(struct
1877 vino_channel_settings *vcs,
1878 unsigned int index)
1879{
1880 struct vino_framebuffer *fb;
1881 unsigned long flags;
1882
1883 dprintk("vino_capture_enqueue():\n");
1884
1885 spin_lock_irqsave(&vcs->capture_lock, flags);
1886
1887 fb = vino_queue_add(&vcs->fb_queue, index);
1888 if (fb == NULL) {
1889 dprintk("vino_capture_enqueue(): vino_queue_add() failed, "
1890 "queue full?\n");
1891 goto out;
1892 }
1893out:
1894 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1895
1896 return fb;
1897}
1898
1899static int vino_capture_next(struct vino_channel_settings *vcs, int start)
1900{
1901 struct vino_framebuffer *fb;
1902 unsigned int incoming, id;
1903 int err = 0;
1904 unsigned long flags, flags2;
1905
1906 dprintk("vino_capture_next():\n");
1907
1908 spin_lock_irqsave(&vcs->capture_lock, flags);
1909
1910 if (start) {
1911 /* start capture only if capture isn't in progress already */
1912 if (vcs->capturing) {
1913 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1914 return 0;
1915 }
1916
1917 } else {
1918 /* capture next frame:
1919 * stop capture if capturing is not set */
1920 if (!vcs->capturing) {
1921 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1922 return 0;
1923 }
1924 }
1925
1926 err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
1927 if (err) {
1928 dprintk("vino_capture_next(): vino_queue_get_incoming() "
1929 "failed\n");
1930 err = -EINVAL;
1931 goto out;
1932 }
1933 if (incoming == 0) {
1934 dprintk("vino_capture_next(): no buffers available\n");
1935 goto out;
1936 }
1937
1938 fb = vino_queue_peek(&vcs->fb_queue, &id);
1939 if (fb == NULL) {
1940 dprintk("vino_capture_next(): vino_queue_peek() failed\n");
1941 err = -EINVAL;
1942 goto out;
1943 }
1944
1945 spin_lock_irqsave(&fb->state_lock, flags2);
1946 fb->state = VINO_FRAMEBUFFER_UNUSED;
1947 spin_unlock_irqrestore(&fb->state_lock, flags2);
1948
1949 if (start) {
1950 vcs->capturing = 1;
1951 }
1952
1953 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1954
1955 err = vino_capture(vcs, fb);
1956
1957 return err;
1958
1959out:
1960 vcs->capturing = 0;
1961 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1962
1963 return err;
1964}
1965
1966static int vino_is_capturing(struct vino_channel_settings *vcs)
1967{
1968 int ret;
1969 unsigned long flags;
1970
1971 spin_lock_irqsave(&vcs->capture_lock, flags);
1972
1973 ret = vcs->capturing;
1974
1975 spin_unlock_irqrestore(&vcs->capture_lock, flags);
1976
1977 return ret;
1978}
1979
1980/* waits until a frame is captured */
1981static int vino_wait_for_frame(struct vino_channel_settings *vcs)
1982{
1983 wait_queue_t wait;
1984 int err = 0;
1985
1986 dprintk("vino_wait_for_frame():\n");
1987
1988 init_waitqueue_entry(&wait, current);
1989 /* add ourselves into wait queue */
1990 add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
1991 /* and set current state */
1992 set_current_state(TASK_INTERRUPTIBLE);
1993
1994 /* to ensure that schedule_timeout will return immediately
1995 * if VINO interrupt was triggred meanwhile */
1996 schedule_timeout(HZ / 10);
1997
1998 if (signal_pending(current))
1999 err = -EINTR;
2000
2001 remove_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
2002
2003 dprintk("vino_wait_for_frame(): waiting for frame %s\n",
2004 err ? "failed" : "ok");
2005
2006 return err;
2007}
2008
2009/* the function assumes that PAGE_SIZE % 4 == 0 */
2010static void vino_convert_to_rgba(struct vino_framebuffer *fb) {
2011 unsigned char *pageptr;
2012 unsigned int page, i;
2013 unsigned char a;
2014
2015 for (page = 0; page < fb->desc_table.page_count; page++) {
2016 pageptr = (unsigned char *)fb->desc_table.virtual[page];
2017
2018 for (i = 0; i < PAGE_SIZE; i += 4) {
2019 a = pageptr[0];
2020 pageptr[0] = pageptr[3];
2021 pageptr[1] = pageptr[2];
2022 pageptr[2] = pageptr[1];
2023 pageptr[3] = a;
2024 pageptr += 4;
2025 }
2026 }
2027}
2028
2029/* checks if the buffer is in correct state and syncs data */
2030static int vino_check_buffer(struct vino_channel_settings *vcs,
2031 struct vino_framebuffer *fb)
2032{
2033 int err = 0;
2034 unsigned long flags;
2035
2036 dprintk("vino_check_buffer():\n");
2037
2038 spin_lock_irqsave(&fb->state_lock, flags);
2039 switch (fb->state) {
2040 case VINO_FRAMEBUFFER_IN_USE:
2041 err = -EIO;
2042 break;
2043 case VINO_FRAMEBUFFER_READY:
2044 vino_sync_buffer(fb);
2045 fb->state = VINO_FRAMEBUFFER_UNUSED;
2046 break;
2047 default:
2048 err = -EINVAL;
2049 }
2050 spin_unlock_irqrestore(&fb->state_lock, flags);
2051
2052 if (!err) {
2053 if (vino_pixel_conversion
2054 && (fb->data_format == VINO_DATA_FMT_RGB32)) {
2055 vino_convert_to_rgba(fb);
2056 }
2057 } else if (err && (err != -EINVAL)) {
2058 dprintk("vino_check_buffer(): buffer not ready\n");
2059
2060 spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
2061 vino_dma_stop(vcs);
2062 vino_clear_interrupt(vcs);
2063 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
2064 }
2065
2066 return err;
2067}
2068
2069/* forcefully terminates capture */
2070static void vino_capture_stop(struct vino_channel_settings *vcs)
2071{
2072 unsigned int incoming = 0, outgoing = 0, id;
2073 unsigned long flags, flags2;
2074
2075 dprintk("vino_capture_stop():\n");
2076
2077 spin_lock_irqsave(&vcs->capture_lock, flags);
2078 /* unset capturing to stop queue processing */
2079 vcs->capturing = 0;
2080
2081 spin_lock_irqsave(&vino_drvdata->vino_lock, flags2);
2082
2083 vino_dma_stop(vcs);
2084 vino_clear_interrupt(vcs);
2085
2086 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags2);
2087
2088 /* remove all items from the queue */
2089 if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
2090 dprintk("vino_capture_stop(): "
2091 "vino_queue_get_incoming() failed\n");
2092 goto out;
2093 }
2094 while (incoming > 0) {
2095 vino_queue_transfer(&vcs->fb_queue);
2096
2097 if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
2098 dprintk("vino_capture_stop(): "
2099 "vino_queue_get_incoming() failed\n");
2100 goto out;
2101 }
2102 }
2103
2104 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
2105 dprintk("vino_capture_stop(): "
2106 "vino_queue_get_outgoing() failed\n");
2107 goto out;
2108 }
2109 while (outgoing > 0) {
2110 vino_queue_remove(&vcs->fb_queue, &id);
2111
2112 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
2113 dprintk("vino_capture_stop(): "
2114 "vino_queue_get_outgoing() failed\n");
2115 goto out;
2116 }
2117 }
2118
2119out:
2120 spin_unlock_irqrestore(&vcs->capture_lock, flags);
2121}
2122
2123static int vino_capture_failed(struct vino_channel_settings *vcs)
2124{
2125 struct vino_framebuffer *fb;
2126 unsigned long flags;
2127 unsigned int i;
2128 int ret;
2129
2130 dprintk("vino_capture_failed():\n");
2131
2132 spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
2133
2134 vino_dma_stop(vcs);
2135 vino_clear_interrupt(vcs);
2136
2137 spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
2138
2139 ret = vino_queue_get_incoming(&vcs->fb_queue, &i);
2140 if (ret == VINO_QUEUE_ERROR) {
2141 dprintk("vino_queue_get_incoming() failed\n");
2142 return -EINVAL;
2143 }
2144 if (i == 0) {
2145 /* no buffers to process */
2146 return 0;
2147 }
2148
2149 fb = vino_queue_peek(&vcs->fb_queue, &i);
2150 if (fb == NULL) {
2151 dprintk("vino_queue_peek() failed\n");
2152 return -EINVAL;
2153 }
2154
2155 spin_lock_irqsave(&fb->state_lock, flags);
2156 if (fb->state == VINO_FRAMEBUFFER_IN_USE) {
2157 fb->state = VINO_FRAMEBUFFER_UNUSED;
2158 vino_queue_transfer(&vcs->fb_queue);
2159 vino_queue_remove(&vcs->fb_queue, &i);
2160 /* we should actually discard the newest frame,
2161 * but who cares ... */
2162 }
2163 spin_unlock_irqrestore(&fb->state_lock, flags);
2164
2165 return 0;
2166}
2167
2168static void vino_frame_done(struct vino_channel_settings *vcs,
2169 unsigned int fc)
2170{
2171 struct vino_framebuffer *fb;
2172 unsigned long flags;
2173
2174 spin_lock_irqsave(&vcs->capture_lock, flags);
2175 fb = vino_queue_transfer(&vcs->fb_queue);
2176 if (!fb) {
2177 spin_unlock_irqrestore(&vcs->capture_lock, flags);
2178 dprintk("vino_frame_done(): vino_queue_transfer() failed!\n");
2179 return;
2180 }
2181 spin_unlock_irqrestore(&vcs->capture_lock, flags);
2182
2183 fb->frame_counter = fc;
2184 do_gettimeofday(&fb->timestamp);
2185
2186 spin_lock_irqsave(&fb->state_lock, flags);
2187 if (fb->state == VINO_FRAMEBUFFER_IN_USE)
2188 fb->state = VINO_FRAMEBUFFER_READY;
2189 spin_unlock_irqrestore(&fb->state_lock, flags);
2190
2191 wake_up(&vcs->fb_queue.frame_wait_queue);
2192
2193 vino_capture_next(vcs, 0);
2194}
2195
2196static irqreturn_t vino_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2197{
2198 u32 intr;
2199 unsigned int fc_a, fc_b;
2200 int done_a = 0;
2201 int done_b = 0;
2202
2203 spin_lock(&vino_drvdata->vino_lock);
2204
2205 intr = vino->intr_status;
2206 fc_a = vino->a.field_counter / 2;
2207 fc_b = vino->b.field_counter / 2;
2208
2209 // TODO: handle error-interrupts in some special way ?
2210
2211 if (intr & VINO_INTSTAT_A) {
2212 if (intr & VINO_INTSTAT_A_EOF) {
2213 vino_drvdata->a.field++;
2214 if (vino_drvdata->a.field > 1) {
2215 vino_dma_stop(&vino_drvdata->a);
2216 vino_clear_interrupt(&vino_drvdata->a);
2217 vino_drvdata->a.field = 0;
2218 done_a = 1;
2219 }
2220 dprintk("intr: channel A end-of-field interrupt: "
2221 "%04x\n", intr);
2222 } else {
2223 vino_dma_stop(&vino_drvdata->a);
2224 vino_clear_interrupt(&vino_drvdata->a);
2225 done_a = 1;
2226 dprintk("channel A error interrupt: %04x\n", intr);
2227 }
2228 }
2229 if (intr & VINO_INTSTAT_B) {
2230 if (intr & VINO_INTSTAT_B_EOF) {
2231 vino_drvdata->b.field++;
2232 if (vino_drvdata->b.field > 1) {
2233 vino_dma_stop(&vino_drvdata->b);
2234 vino_clear_interrupt(&vino_drvdata->b);
2235 vino_drvdata->b.field = 0;
2236 done_b = 1;
2237 }
2238 dprintk("intr: channel B end-of-field interrupt: "
2239 "%04x\n", intr);
2240 } else {
2241 vino_dma_stop(&vino_drvdata->b);
2242 vino_clear_interrupt(&vino_drvdata->b);
2243 done_b = 1;
2244 dprintk("channel B error interrupt: %04x\n", intr);
2245 }
2246 }
2247
2248 /* always remember to clear interrupt status */
2249 vino->intr_status = ~intr;
2250
2251 spin_unlock(&vino_drvdata->vino_lock);
2252
2253 if (done_a) {
2254 vino_frame_done(&vino_drvdata->a, fc_a);
2255 dprintk("channel A frame done, interrupt: %d\n", intr);
2256 }
2257 if (done_b) {
2258 vino_frame_done(&vino_drvdata->b, fc_b);
2259 dprintk("channel B frame done, interrupt: %d\n", intr);
2260 }
207 2261
208 return -EINVAL; 2262 return IRQ_HANDLED;
209} 2263}
210 2264
211static const struct video_device vino_device = { 2265/* VINO video input management */
2266
2267static int vino_get_saa7191_input(int input)
2268{
2269 switch (input) {
2270 case VINO_INPUT_COMPOSITE:
2271 return SAA7191_INPUT_COMPOSITE;
2272 case VINO_INPUT_SVIDEO:
2273 return SAA7191_INPUT_SVIDEO;
2274 default:
2275 printk(KERN_ERR "VINO: vino_get_saa7191_input(): "
2276 "invalid input!\n");
2277 return -1;
2278 }
2279}
2280
2281static int vino_get_saa7191_norm(int norm)
2282{
2283 switch (norm) {
2284 case VINO_DATA_NORM_AUTO:
2285 return SAA7191_NORM_AUTO;
2286 case VINO_DATA_NORM_PAL:
2287 return SAA7191_NORM_PAL;
2288 case VINO_DATA_NORM_NTSC:
2289 return SAA7191_NORM_NTSC;
2290 case VINO_DATA_NORM_SECAM:
2291 return SAA7191_NORM_SECAM;
2292 default:
2293 printk(KERN_ERR "VINO: vino_get_saa7191_norm(): "
2294 "invalid norm!\n");
2295 return -1;
2296 }
2297}
2298
2299/* execute with input_lock locked */
2300static int vino_is_input_owner(struct vino_channel_settings *vcs)
2301{
2302 switch(vcs->input) {
2303 case VINO_INPUT_COMPOSITE:
2304 case VINO_INPUT_SVIDEO:
2305 return (vino_drvdata->decoder.owner == vcs->channel);
2306 case VINO_INPUT_D1:
2307 return (vino_drvdata->camera.owner == vcs->channel);
2308 default:
2309 return 0;
2310 }
2311}
2312
2313static int vino_acquire_input(struct vino_channel_settings *vcs)
2314{
2315 int ret = 0;
2316
2317 dprintk("vino_acquire_input():\n");
2318
2319 spin_lock(&vino_drvdata->input_lock);
2320
2321 /* First try D1 and then SAA7191 */
2322 if (vino_drvdata->camera.driver
2323 && (vino_drvdata->camera.owner == VINO_NO_CHANNEL)) {
2324 if (i2c_use_client(vino_drvdata->camera.driver)) {
2325 ret = -ENODEV;
2326 goto out;
2327 }
2328
2329 vino_drvdata->camera.owner = vcs->channel;
2330 vcs->input = VINO_INPUT_D1;
2331 vcs->data_norm = VINO_DATA_NORM_D1;
2332 } else if (vino_drvdata->decoder.driver
2333 && (vino_drvdata->decoder.owner == VINO_NO_CHANNEL)) {
2334 int saa7191_input;
2335 int saa7191_norm;
2336
2337 if (i2c_use_client(vino_drvdata->decoder.driver)) {
2338 ret = -ENODEV;
2339 goto out;
2340 }
2341
2342 vino_drvdata->decoder.owner = vcs->channel;
2343 vcs->input = VINO_INPUT_COMPOSITE;
2344 vcs->data_norm = VINO_DATA_NORM_PAL;
2345
2346 saa7191_input = vino_get_saa7191_input(vcs->input);
2347 i2c_decoder_command(DECODER_SET_INPUT, &saa7191_input);
2348
2349 saa7191_norm = vino_get_saa7191_norm(vcs->data_norm);
2350 i2c_decoder_command(DECODER_SAA7191_SET_NORM, &saa7191_norm);
2351 } else {
2352 vcs->input = (vcs->channel == VINO_CHANNEL_A) ?
2353 vino_drvdata->b.input : vino_drvdata->a.input;
2354 vcs->data_norm = (vcs->channel == VINO_CHANNEL_A) ?
2355 vino_drvdata->b.data_norm : vino_drvdata->a.data_norm;
2356 }
2357
2358 if (vcs->input == VINO_INPUT_NONE) {
2359 ret = -ENODEV;
2360 goto out;
2361 }
2362
2363 if (vino_is_input_owner(vcs)) {
2364 vino_set_default_clipping(vcs);
2365 vino_set_default_framerate(vcs);
2366 }
2367
2368 dprintk("vino_acquire_input(): %s\n", vino_inputs[vcs->input].name);
2369
2370out:
2371 spin_unlock(&vino_drvdata->input_lock);
2372
2373 return ret;
2374}
2375
2376static int vino_set_input(struct vino_channel_settings *vcs, int input)
2377{
2378 struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
2379 &vino_drvdata->b : &vino_drvdata->a;
2380 int ret = 0;
2381
2382 dprintk("vino_set_input():\n");
2383
2384 spin_lock(&vino_drvdata->input_lock);
2385
2386 if (vcs->input == input)
2387 goto out;
2388
2389 switch(input) {
2390 case VINO_INPUT_COMPOSITE:
2391 case VINO_INPUT_SVIDEO:
2392 if (!vino_drvdata->decoder.driver) {
2393 ret = -EINVAL;
2394 goto out;
2395 }
2396
2397 if (vino_drvdata->decoder.owner == VINO_NO_CHANNEL) {
2398 if (i2c_use_client(vino_drvdata->decoder.driver)) {
2399 ret = -ENODEV;
2400 goto out;
2401 }
2402 vino_drvdata->decoder.owner = vcs->channel;
2403 }
2404
2405 if (vino_drvdata->decoder.owner == vcs->channel) {
2406 int saa7191_input;
2407 int saa7191_norm;
2408
2409 vcs->input = input;
2410 vcs->data_norm = VINO_DATA_NORM_PAL;
2411
2412 saa7191_input = vino_get_saa7191_input(vcs->input);
2413 i2c_decoder_command(DECODER_SET_INPUT, &saa7191_input);
2414 saa7191_norm = vino_get_saa7191_norm(vcs->data_norm);
2415 i2c_decoder_command(DECODER_SAA7191_SET_NORM,
2416 &saa7191_norm);
2417 } else {
2418 if (vcs2->input != input) {
2419 ret = -EBUSY;
2420 goto out;
2421 }
2422
2423 vcs->input = input;
2424 vcs->data_norm = vcs2->data_norm;
2425 }
2426
2427 if (vino_drvdata->camera.owner == vcs->channel) {
2428 /* Transfer the ownership or release the input */
2429 if (vcs2->input == VINO_INPUT_D1) {
2430 vino_drvdata->camera.owner = vcs2->channel;
2431 } else {
2432 i2c_release_client(vino_drvdata->
2433 camera.driver);
2434 vino_drvdata->camera.owner = VINO_NO_CHANNEL;
2435 }
2436 }
2437 break;
2438 case VINO_INPUT_D1:
2439 if (!vino_drvdata->camera.driver) {
2440 ret = -EINVAL;
2441 goto out;
2442 }
2443
2444 if (vino_drvdata->camera.owner == VINO_NO_CHANNEL) {
2445 if (i2c_use_client(vino_drvdata->camera.driver)) {
2446 ret = -ENODEV;
2447 goto out;
2448 }
2449 vino_drvdata->camera.owner = vcs->channel;
2450 }
2451
2452 if (vino_drvdata->decoder.owner == vcs->channel) {
2453 /* Transfer the ownership or release the input */
2454 if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
2455 (vcs2->input == VINO_INPUT_SVIDEO)) {
2456 vino_drvdata->decoder.owner = vcs2->channel;
2457 } else {
2458 i2c_release_client(vino_drvdata->
2459 decoder.driver);
2460 vino_drvdata->decoder.owner = VINO_NO_CHANNEL;
2461 }
2462 }
2463
2464 vcs->input = input;
2465 vcs->data_norm = VINO_DATA_NORM_D1;
2466 break;
2467 default:
2468 ret = -EINVAL;
2469 goto out;
2470 }
2471
2472 vino_set_default_clipping(vcs);
2473 vino_set_default_framerate(vcs);
2474
2475 dprintk("vino_set_input(): %s\n", vino_inputs[vcs->input].name);
2476
2477out:
2478 spin_unlock(&vino_drvdata->input_lock);
2479
2480 return ret;
2481}
2482
2483static void vino_release_input(struct vino_channel_settings *vcs)
2484{
2485 struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
2486 &vino_drvdata->b : &vino_drvdata->a;
2487
2488 dprintk("vino_release_input():\n");
2489
2490 spin_lock(&vino_drvdata->input_lock);
2491
2492 /* Release ownership of the channel
2493 * and if the other channel takes input from
2494 * the same source, transfer the ownership */
2495 if (vino_drvdata->camera.owner == vcs->channel) {
2496 if (vcs2->input == VINO_INPUT_D1) {
2497 vino_drvdata->camera.owner = vcs2->channel;
2498 } else {
2499 i2c_release_client(vino_drvdata->camera.driver);
2500 vino_drvdata->camera.owner = VINO_NO_CHANNEL;
2501 }
2502 } else if (vino_drvdata->decoder.owner == vcs->channel) {
2503 if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
2504 (vcs2->input == VINO_INPUT_SVIDEO)) {
2505 vino_drvdata->decoder.owner = vcs2->channel;
2506 } else {
2507 i2c_release_client(vino_drvdata->decoder.driver);
2508 vino_drvdata->decoder.owner = VINO_NO_CHANNEL;
2509 }
2510 }
2511 vcs->input = VINO_INPUT_NONE;
2512
2513 spin_unlock(&vino_drvdata->input_lock);
2514}
2515
2516/* execute with input_lock locked */
2517static int vino_set_data_norm(struct vino_channel_settings *vcs,
2518 unsigned int data_norm)
2519{
2520 int saa7191_norm;
2521
2522 switch (vcs->input) {
2523 case VINO_INPUT_D1:
2524 /* only one "norm" supported */
2525 if (data_norm != VINO_DATA_NORM_D1)
2526 return -EINVAL;
2527 break;
2528 case VINO_INPUT_COMPOSITE:
2529 case VINO_INPUT_SVIDEO:
2530
2531 saa7191_norm = vino_get_saa7191_norm(data_norm);
2532
2533 i2c_decoder_command(DECODER_SAA7191_SET_NORM, &saa7191_norm);
2534 vcs->data_norm = data_norm;
2535 break;
2536 default:
2537 return -EINVAL;
2538 }
2539
2540 return 0;
2541}
2542
2543/* V4L2 helper functions */
2544
2545static int vino_find_data_format(__u32 pixelformat)
2546{
2547 int i;
2548
2549 for (i = 0; i < VINO_DATA_FMT_COUNT; i++) {
2550 if (vino_data_formats[i].pixelformat == pixelformat)
2551 return i;
2552 }
2553
2554 return VINO_DATA_FMT_NONE;
2555}
2556
2557static int vino_enum_data_norm(struct vino_channel_settings *vcs, __u32 index)
2558{
2559 int data_norm = VINO_DATA_NORM_NONE;
2560
2561 spin_lock(&vino_drvdata->input_lock);
2562 switch(vcs->input) {
2563 case VINO_INPUT_COMPOSITE:
2564 case VINO_INPUT_SVIDEO:
2565 if (index == 0) {
2566 data_norm = VINO_DATA_NORM_PAL;
2567 } else if (index == 1) {
2568 data_norm = VINO_DATA_NORM_NTSC;
2569 } else if (index == 2) {
2570 data_norm = VINO_DATA_NORM_SECAM;
2571 }
2572 break;
2573 case VINO_INPUT_D1:
2574 if (index == 0) {
2575 data_norm = VINO_DATA_NORM_D1;
2576 }
2577 break;
2578 }
2579 spin_unlock(&vino_drvdata->input_lock);
2580
2581 return data_norm;
2582}
2583
2584static int vino_enum_input(struct vino_channel_settings *vcs, __u32 index)
2585{
2586 int input = VINO_INPUT_NONE;
2587
2588 spin_lock(&vino_drvdata->input_lock);
2589 if (vino_drvdata->decoder.driver && vino_drvdata->camera.driver) {
2590 switch (index) {
2591 case 0:
2592 input = VINO_INPUT_COMPOSITE;
2593 break;
2594 case 1:
2595 input = VINO_INPUT_SVIDEO;
2596 break;
2597 case 2:
2598 input = VINO_INPUT_D1;
2599 break;
2600 }
2601 } else if (vino_drvdata->decoder.driver) {
2602 switch (index) {
2603 case 0:
2604 input = VINO_INPUT_COMPOSITE;
2605 break;
2606 case 1:
2607 input = VINO_INPUT_SVIDEO;
2608 break;
2609 }
2610 } else if (vino_drvdata->camera.driver) {
2611 switch (index) {
2612 case 0:
2613 input = VINO_INPUT_D1;
2614 break;
2615 }
2616 }
2617 spin_unlock(&vino_drvdata->input_lock);
2618
2619 return input;
2620}
2621
2622/* execute with input_lock locked */
2623static __u32 vino_find_input_index(struct vino_channel_settings *vcs)
2624{
2625 __u32 index = 0;
2626 // FIXME: detect when no inputs available
2627
2628 if (vino_drvdata->decoder.driver && vino_drvdata->camera.driver) {
2629 switch (vcs->input) {
2630 case VINO_INPUT_COMPOSITE:
2631 index = 0;
2632 break;
2633 case VINO_INPUT_SVIDEO:
2634 index = 1;
2635 break;
2636 case VINO_INPUT_D1:
2637 index = 2;
2638 break;
2639 }
2640 } else if (vino_drvdata->decoder.driver) {
2641 switch (vcs->input) {
2642 case VINO_INPUT_COMPOSITE:
2643 index = 0;
2644 break;
2645 case VINO_INPUT_SVIDEO:
2646 index = 1;
2647 break;
2648 }
2649 } else if (vino_drvdata->camera.driver) {
2650 switch (vcs->input) {
2651 case VINO_INPUT_D1:
2652 index = 0;
2653 break;
2654 }
2655 }
2656
2657 return index;
2658}
2659
2660/* V4L2 ioctls */
2661
2662static void vino_v4l2_querycap(struct v4l2_capability *cap)
2663{
2664 memset(cap, 0, sizeof(struct v4l2_capability));
2665
2666 strcpy(cap->driver, vino_driver_name);
2667 strcpy(cap->card, vino_driver_description);
2668 strcpy(cap->bus_info, vino_bus_name);
2669 cap->version = VINO_VERSION_CODE;
2670 cap->capabilities =
2671 V4L2_CAP_VIDEO_CAPTURE |
2672 V4L2_CAP_STREAMING;
2673 // V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE
2674}
2675
2676static int vino_v4l2_enuminput(struct vino_channel_settings *vcs,
2677 struct v4l2_input *i)
2678{
2679 __u32 index = i->index;
2680 int input;
2681 dprintk("requested index = %d\n", index);
2682
2683 input = vino_enum_input(vcs, index);
2684 if (input == VINO_INPUT_NONE)
2685 return -EINVAL;
2686
2687 memset(i, 0, sizeof(struct v4l2_input));
2688
2689 i->index = index;
2690 i->type = V4L2_INPUT_TYPE_CAMERA;
2691 i->std = vino_inputs[input].std;
2692 strcpy(i->name, vino_inputs[input].name);
2693
2694 if ((input == VINO_INPUT_COMPOSITE)
2695 || (input == VINO_INPUT_SVIDEO)) {
2696 struct saa7191_status status;
2697 i2c_decoder_command(DECODER_SAA7191_GET_STATUS, &status);
2698 i->status |= status.signal ? 0 : V4L2_IN_ST_NO_SIGNAL;
2699 i->status |= status.color ? 0 : V4L2_IN_ST_NO_COLOR;
2700 }
2701
2702 return 0;
2703}
2704
2705static int vino_v4l2_g_input(struct vino_channel_settings *vcs,
2706 struct v4l2_input *i)
2707{
2708 __u32 index;
2709 int input;
2710
2711 spin_lock(&vino_drvdata->input_lock);
2712 input = vcs->input;
2713 index = vino_find_input_index(vcs);
2714 spin_unlock(&vino_drvdata->input_lock);
2715
2716 dprintk("input = %d\n", input);
2717
2718 if (input == VINO_INPUT_NONE) {
2719 return -EINVAL;
2720 }
2721
2722 memset(i, 0, sizeof(struct v4l2_input));
2723
2724 i->index = index;
2725 i->type = V4L2_INPUT_TYPE_CAMERA;
2726 i->std = vino_inputs[input].std;
2727 strcpy(i->name, vino_inputs[input].name);
2728
2729 return 0;
2730}
2731
2732static int vino_v4l2_s_input(struct vino_channel_settings *vcs,
2733 struct v4l2_input *i)
2734{
2735 int input;
2736 dprintk("requested input = %d\n", i->index);
2737
2738 input = vino_enum_input(vcs, i->index);
2739 if (input == VINO_INPUT_NONE)
2740 return -EINVAL;
2741
2742 return vino_set_input(vcs, input);
2743}
2744
2745static int vino_v4l2_enumstd(struct vino_channel_settings *vcs,
2746 struct v4l2_standard *s)
2747{
2748 int index = s->index;
2749 int data_norm = vino_enum_data_norm(vcs, index);
2750 dprintk("standard index = %d\n", index);
2751
2752 if (data_norm == VINO_DATA_NORM_NONE)
2753 return -EINVAL;
2754
2755 dprintk("standard name = %s\n",
2756 vino_data_norms[data_norm].description);
2757
2758 memset(s, 0, sizeof(struct v4l2_standard));
2759 s->index = index;
2760
2761 s->id = vino_data_norms[data_norm].std;
2762 s->frameperiod.numerator = 1;
2763 s->frameperiod.denominator =
2764 vino_data_norms[data_norm].fps_max;
2765 s->framelines =
2766 vino_data_norms[data_norm].framelines;
2767 strcpy(s->name,
2768 vino_data_norms[data_norm].description);
2769
2770 return 0;
2771}
2772
2773static int vino_v4l2_g_std(struct vino_channel_settings *vcs,
2774 v4l2_std_id *std)
2775{
2776 spin_lock(&vino_drvdata->input_lock);
2777 dprintk("current standard = %d\n", vcs->data_norm);
2778 *std = vino_data_norms[vcs->data_norm].std;
2779 spin_unlock(&vino_drvdata->input_lock);
2780
2781 return 0;
2782}
2783
2784static int vino_v4l2_s_std(struct vino_channel_settings *vcs,
2785 v4l2_std_id *std)
2786{
2787 int ret = 0;
2788
2789 spin_lock(&vino_drvdata->input_lock);
2790
2791 /* check if the standard is valid for the current input */
2792 if (vino_is_input_owner(vcs)
2793 && (vino_inputs[vcs->input].std & (*std))) {
2794 dprintk("standard accepted\n");
2795
2796 /* change the video norm for SAA7191
2797 * and accept NTSC for D1 (do nothing) */
2798
2799 if (vcs->input == VINO_INPUT_D1)
2800 goto out;
2801
2802 if ((*std) & V4L2_STD_PAL) {
2803 vino_set_data_norm(vcs, VINO_DATA_NORM_PAL);
2804 vcs->data_norm = VINO_DATA_NORM_PAL;
2805 } else if ((*std) & V4L2_STD_NTSC) {
2806 vino_set_data_norm(vcs, VINO_DATA_NORM_NTSC);
2807 vcs->data_norm = VINO_DATA_NORM_NTSC;
2808 } else if ((*std) & V4L2_STD_SECAM) {
2809 vino_set_data_norm(vcs, VINO_DATA_NORM_SECAM);
2810 vcs->data_norm = VINO_DATA_NORM_SECAM;
2811 } else {
2812 ret = -EINVAL;
2813 }
2814 } else {
2815 ret = -EINVAL;
2816 }
2817
2818out:
2819 spin_unlock(&vino_drvdata->input_lock);
2820
2821 return ret;
2822}
2823
2824static int vino_v4l2_enum_fmt(struct vino_channel_settings *vcs,
2825 struct v4l2_fmtdesc *fd)
2826{
2827 enum v4l2_buf_type type = fd->type;
2828 int index = fd->index;
2829 dprintk("format index = %d\n", index);
2830
2831 switch (fd->type) {
2832 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
2833 if ((fd->index < 0) ||
2834 (fd->index >= VINO_DATA_FMT_COUNT))
2835 return -EINVAL;
2836 dprintk("format name = %s\n",
2837 vino_data_formats[index].description);
2838
2839 memset(fd, 0, sizeof(struct v4l2_fmtdesc));
2840 fd->index = index;
2841 fd->type = type;
2842 fd->pixelformat = vino_data_formats[index].pixelformat;
2843 strcpy(fd->description, vino_data_formats[index].description);
2844 break;
2845 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2846 default:
2847 return -EINVAL;
2848 }
2849
2850 return 0;
2851}
2852
2853static int vino_v4l2_try_fmt(struct vino_channel_settings *vcs,
2854 struct v4l2_format *f)
2855{
2856 struct vino_channel_settings tempvcs;
2857
2858 switch (f->type) {
2859 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
2860 struct v4l2_pix_format *pf = &f->fmt.pix;
2861
2862 dprintk("requested: w = %d, h = %d\n",
2863 pf->width, pf->height);
2864
2865 spin_lock(&vino_drvdata->input_lock);
2866 memcpy(&tempvcs, vcs, sizeof(struct vino_channel_settings));
2867 spin_unlock(&vino_drvdata->input_lock);
2868
2869 tempvcs.data_format = vino_find_data_format(pf->pixelformat);
2870 if (tempvcs.data_format == VINO_DATA_FMT_NONE) {
2871 tempvcs.data_format = VINO_DATA_FMT_RGB32;
2872 pf->pixelformat =
2873 vino_data_formats[tempvcs.data_format].
2874 pixelformat;
2875 }
2876
2877 /* data format must be set before clipping/scaling */
2878 vino_set_scaling(&tempvcs, pf->width, pf->height);
2879
2880 dprintk("data format = %s\n",
2881 vino_data_formats[tempvcs.data_format].description);
2882
2883 pf->width = (tempvcs.clipping.right - tempvcs.clipping.left) /
2884 tempvcs.decimation;
2885 pf->height = (tempvcs.clipping.bottom - tempvcs.clipping.top) /
2886 tempvcs.decimation;
2887
2888 pf->field = V4L2_FIELD_INTERLACED;
2889 pf->bytesperline = tempvcs.line_size;
2890 pf->sizeimage = tempvcs.line_size *
2891 (tempvcs.clipping.bottom - tempvcs.clipping.top) /
2892 tempvcs.decimation;
2893 pf->colorspace =
2894 vino_data_formats[tempvcs.data_format].colorspace;
2895
2896 pf->priv = 0;
2897 break;
2898 }
2899 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2900 default:
2901 return -EINVAL;
2902 }
2903
2904 return 0;
2905}
2906
2907static int vino_v4l2_g_fmt(struct vino_channel_settings *vcs,
2908 struct v4l2_format *f)
2909{
2910 switch (f->type) {
2911 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
2912 struct v4l2_pix_format *pf = &f->fmt.pix;
2913 spin_lock(&vino_drvdata->input_lock);
2914
2915 pf->width = (vcs->clipping.right - vcs->clipping.left) /
2916 vcs->decimation;
2917 pf->height = (vcs->clipping.bottom - vcs->clipping.top) /
2918 vcs->decimation;
2919 pf->pixelformat =
2920 vino_data_formats[vcs->data_format].pixelformat;
2921
2922 pf->field = V4L2_FIELD_INTERLACED;
2923 pf->bytesperline = vcs->line_size;
2924 pf->sizeimage = vcs->line_size *
2925 (vcs->clipping.bottom - vcs->clipping.top) /
2926 vcs->decimation;
2927 pf->colorspace =
2928 vino_data_formats[vcs->data_format].colorspace;
2929
2930 pf->priv = 0;
2931
2932 spin_unlock(&vino_drvdata->input_lock);
2933 break;
2934 }
2935 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2936 default:
2937 return -EINVAL;
2938 }
2939
2940 return 0;
2941}
2942
2943static int vino_v4l2_s_fmt(struct vino_channel_settings *vcs,
2944 struct v4l2_format *f)
2945{
2946 int data_format;
2947
2948 switch (f->type) {
2949 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
2950 struct v4l2_pix_format *pf = &f->fmt.pix;
2951 spin_lock(&vino_drvdata->input_lock);
2952
2953 if (!vino_is_input_owner(vcs)) {
2954 spin_unlock(&vino_drvdata->input_lock);
2955 return -EINVAL;
2956 }
2957
2958 data_format = vino_find_data_format(pf->pixelformat);
2959 if (data_format == VINO_DATA_FMT_NONE) {
2960 vcs->data_format = VINO_DATA_FMT_RGB32;
2961 pf->pixelformat =
2962 vino_data_formats[vcs->data_format].
2963 pixelformat;
2964 } else {
2965 vcs->data_format = data_format;
2966 }
2967
2968 /* data format must be set before clipping/scaling */
2969 vino_set_scaling(vcs, pf->width, pf->height);
2970
2971 dprintk("data format = %s\n",
2972 vino_data_formats[vcs->data_format].description);
2973
2974 pf->width = vcs->clipping.right - vcs->clipping.left;
2975 pf->height = vcs->clipping.bottom - vcs->clipping.top;
2976
2977 pf->field = V4L2_FIELD_INTERLACED;
2978 pf->bytesperline = vcs->line_size;
2979 pf->sizeimage = vcs->line_size *
2980 (vcs->clipping.bottom - vcs->clipping.top) /
2981 vcs->decimation;
2982 pf->colorspace =
2983 vino_data_formats[vcs->data_format].colorspace;
2984
2985 pf->priv = 0;
2986
2987 spin_unlock(&vino_drvdata->input_lock);
2988 break;
2989 }
2990 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
2991 default:
2992 return -EINVAL;
2993 }
2994
2995 return 0;
2996}
2997
2998static int vino_v4l2_cropcap(struct vino_channel_settings *vcs,
2999 struct v4l2_cropcap *ccap)
3000{
3001 const struct vino_data_norm *norm;
3002
3003 switch (ccap->type) {
3004 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
3005 spin_lock(&vino_drvdata->input_lock);
3006 norm = &vino_data_norms[vcs->data_norm];
3007 spin_unlock(&vino_drvdata->input_lock);
3008
3009 ccap->bounds.left = 0;
3010 ccap->bounds.top = 0;
3011 ccap->bounds.width = norm->width;
3012 ccap->bounds.height = norm->height;
3013 memcpy(&ccap->defrect, &ccap->bounds,
3014 sizeof(struct v4l2_rect));
3015
3016 ccap->pixelaspect.numerator = 1;
3017 ccap->pixelaspect.denominator = 1;
3018 break;
3019 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3020 default:
3021 return -EINVAL;
3022 }
3023
3024 return 0;
3025}
3026
3027static int vino_v4l2_g_crop(struct vino_channel_settings *vcs,
3028 struct v4l2_crop *c)
3029{
3030 switch (c->type) {
3031 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
3032 spin_lock(&vino_drvdata->input_lock);
3033
3034 c->c.left = vcs->clipping.left;
3035 c->c.top = vcs->clipping.top;
3036 c->c.width = vcs->clipping.right - vcs->clipping.left;
3037 c->c.height = vcs->clipping.bottom - vcs->clipping.top;
3038
3039 spin_unlock(&vino_drvdata->input_lock);
3040 break;
3041 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3042 default:
3043 return -EINVAL;
3044 }
3045
3046 return 0;
3047}
3048
3049static int vino_v4l2_s_crop(struct vino_channel_settings *vcs,
3050 struct v4l2_crop *c)
3051{
3052 switch (c->type) {
3053 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
3054 spin_lock(&vino_drvdata->input_lock);
3055
3056 if (!vino_is_input_owner(vcs)) {
3057 spin_unlock(&vino_drvdata->input_lock);
3058 return -EINVAL;
3059 }
3060 vino_set_clipping(vcs, c->c.left, c->c.top,
3061 c->c.width, c->c.height);
3062
3063 spin_unlock(&vino_drvdata->input_lock);
3064 break;
3065 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3066 default:
3067 return -EINVAL;
3068 }
3069
3070 return 0;
3071}
3072
3073static int vino_v4l2_g_parm(struct vino_channel_settings *vcs,
3074 struct v4l2_streamparm *sp)
3075{
3076 switch (sp->type) {
3077 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3078 struct v4l2_captureparm *cp = &sp->parm.capture;
3079 memset(cp, 0, sizeof(struct v4l2_captureparm));
3080
3081 cp->capability = V4L2_CAP_TIMEPERFRAME;
3082 cp->timeperframe.numerator = 1;
3083
3084 spin_lock(&vino_drvdata->input_lock);
3085 cp->timeperframe.denominator = vcs->fps;
3086 spin_unlock(&vino_drvdata->input_lock);
3087
3088 // TODO: cp->readbuffers = xxx;
3089 break;
3090 }
3091 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3092 default:
3093 return -EINVAL;
3094 }
3095
3096 return 0;
3097}
3098
3099static int vino_v4l2_s_parm(struct vino_channel_settings *vcs,
3100 struct v4l2_streamparm *sp)
3101{
3102 switch (sp->type) {
3103 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3104 struct v4l2_captureparm *cp = &sp->parm.capture;
3105
3106 spin_lock(&vino_drvdata->input_lock);
3107 if (!vino_is_input_owner(vcs)) {
3108 spin_unlock(&vino_drvdata->input_lock);
3109 return -EINVAL;
3110 }
3111
3112 if ((cp->timeperframe.numerator == 0) ||
3113 (cp->timeperframe.denominator == 0)) {
3114 /* reset framerate */
3115 vino_set_default_framerate(vcs);
3116 } else {
3117 vino_set_framerate(vcs, cp->timeperframe.denominator /
3118 cp->timeperframe.numerator);
3119 }
3120 spin_unlock(&vino_drvdata->input_lock);
3121
3122 // TODO: set buffers according to cp->readbuffers
3123 break;
3124 }
3125 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3126 default:
3127 return -EINVAL;
3128 }
3129
3130 return 0;
3131}
3132
3133static int vino_v4l2_reqbufs(struct vino_channel_settings *vcs,
3134 struct v4l2_requestbuffers *rb)
3135{
3136 if (vcs->reading)
3137 return -EBUSY;
3138
3139 switch (rb->type) {
3140 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3141 // TODO: check queue type
3142 if (rb->memory != V4L2_MEMORY_MMAP) {
3143 dprintk("type not mmap\n");
3144 return -EINVAL;
3145 }
3146
3147 if (vino_is_capturing(vcs)) {
3148 dprintk("busy, capturing\n");
3149 return -EBUSY;
3150 }
3151
3152 dprintk("count = %d\n", rb->count);
3153 if (rb->count > 0) {
3154 if (vino_queue_has_mapped_buffers(&vcs->fb_queue)) {
3155 dprintk("busy, buffers still mapped\n");
3156 return -EBUSY;
3157 } else {
3158 vino_queue_free(&vcs->fb_queue);
3159 vino_queue_init(&vcs->fb_queue, &rb->count);
3160 }
3161 } else {
3162 vino_capture_stop(vcs);
3163 vino_queue_free(&vcs->fb_queue);
3164 }
3165 break;
3166 }
3167 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3168 default:
3169 return -EINVAL;
3170 }
3171
3172 return 0;
3173}
3174
3175static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
3176 struct vino_framebuffer *fb,
3177 struct v4l2_buffer *b)
3178{
3179 if (vino_queue_outgoing_contains(&vcs->fb_queue,
3180 fb->id)) {
3181 b->flags &= ~V4L2_BUF_FLAG_QUEUED;
3182 b->flags |= V4L2_BUF_FLAG_DONE;
3183 } else if (vino_queue_incoming_contains(&vcs->fb_queue,
3184 fb->id)) {
3185 b->flags &= ~V4L2_BUF_FLAG_DONE;
3186 b->flags |= V4L2_BUF_FLAG_QUEUED;
3187 } else {
3188 b->flags &= ~(V4L2_BUF_FLAG_DONE |
3189 V4L2_BUF_FLAG_QUEUED);
3190 }
3191
3192 b->flags &= ~(V4L2_BUF_FLAG_TIMECODE);
3193
3194 if (fb->map_count > 0)
3195 b->flags |= V4L2_BUF_FLAG_MAPPED;
3196
3197 b->index = fb->id;
3198 b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
3199 V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
3200 b->m.offset = fb->offset;
3201 b->bytesused = fb->data_size;
3202 b->length = fb->size;
3203 b->field = V4L2_FIELD_INTERLACED;
3204 b->sequence = fb->frame_counter;
3205 memcpy(&b->timestamp, &fb->timestamp,
3206 sizeof(struct timeval));
3207 // b->input ?
3208
3209 dprintk("buffer %d: length = %d, bytesused = %d, offset = %d\n",
3210 fb->id, fb->size, fb->data_size, fb->offset);
3211}
3212
3213static int vino_v4l2_querybuf(struct vino_channel_settings *vcs,
3214 struct v4l2_buffer *b)
3215{
3216 if (vcs->reading)
3217 return -EBUSY;
3218
3219 switch (b->type) {
3220 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3221 struct vino_framebuffer *fb;
3222
3223 // TODO: check queue type
3224 if (b->index >= vino_queue_get_length(&vcs->fb_queue)) {
3225 dprintk("invalid index = %d\n",
3226 b->index);
3227 return -EINVAL;
3228 }
3229
3230 fb = vino_queue_get_buffer(&vcs->fb_queue,
3231 b->index);
3232 if (fb == NULL) {
3233 dprintk("vino_queue_get_buffer() failed");
3234 return -EINVAL;
3235 }
3236
3237 vino_v4l2_get_buffer_status(vcs, fb, b);
3238 break;
3239 }
3240 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3241 default:
3242 return -EINVAL;
3243 }
3244
3245 return 0;
3246}
3247
3248static int vino_v4l2_qbuf(struct vino_channel_settings *vcs,
3249 struct v4l2_buffer *b)
3250{
3251 if (vcs->reading)
3252 return -EBUSY;
3253
3254 switch (b->type) {
3255 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3256 struct vino_framebuffer *fb;
3257 int ret;
3258
3259 // TODO: check queue type
3260 if (b->memory != V4L2_MEMORY_MMAP) {
3261 dprintk("type not mmap\n");
3262 return -EINVAL;
3263 }
3264
3265 fb = vino_capture_enqueue(vcs, b->index);
3266 if (fb == NULL)
3267 return -EINVAL;
3268
3269 vino_v4l2_get_buffer_status(vcs, fb, b);
3270
3271 if (vcs->streaming) {
3272 ret = vino_capture_next(vcs, 1);
3273 if (ret)
3274 return ret;
3275 }
3276 break;
3277 }
3278 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3279 default:
3280 return -EINVAL;
3281 }
3282
3283 return 0;
3284}
3285
3286static int vino_v4l2_dqbuf(struct vino_channel_settings *vcs,
3287 struct v4l2_buffer *b,
3288 unsigned int nonblocking)
3289{
3290 if (vcs->reading)
3291 return -EBUSY;
3292
3293 switch (b->type) {
3294 case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
3295 struct vino_framebuffer *fb;
3296 unsigned int incoming, outgoing;
3297 int err;
3298
3299 // TODO: check queue type
3300
3301 err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
3302 if (err) {
3303 dprintk("vino_queue_get_incoming() failed\n");
3304 return -EIO;
3305 }
3306 err = vino_queue_get_outgoing(&vcs->fb_queue, &outgoing);
3307 if (err) {
3308 dprintk("vino_queue_get_outgoing() failed\n");
3309 return -EIO;
3310 }
3311
3312 dprintk("incoming = %d, outgoing = %d\n", incoming, outgoing);
3313
3314 if (outgoing == 0) {
3315 if (incoming == 0) {
3316 dprintk("no incoming or outgoing buffers\n");
3317 return -EINVAL;
3318 }
3319 if (nonblocking) {
3320 dprintk("non-blocking I/O was selected and "
3321 "there are no buffers to dequeue\n");
3322 return -EAGAIN;
3323 }
3324
3325 err = vino_wait_for_frame(vcs);
3326 if (err) {
3327 err = vino_wait_for_frame(vcs);
3328 if (err) {
3329 /* interrupted */
3330 vino_capture_failed(vcs);
3331 return -EIO;
3332 }
3333 }
3334 }
3335
3336 fb = vino_queue_remove(&vcs->fb_queue, &b->index);
3337 if (fb == NULL) {
3338 dprintk("vino_queue_remove() failed\n");
3339 return -EINVAL;
3340 }
3341
3342 err = vino_check_buffer(vcs, fb);
3343 if (err)
3344 return -EIO;
3345
3346 vino_v4l2_get_buffer_status(vcs, fb, b);
3347 break;
3348 }
3349 case V4L2_BUF_TYPE_VIDEO_OVERLAY:
3350 default:
3351 return -EINVAL;
3352 }
3353
3354 return 0;
3355}
3356
3357static int vino_v4l2_streamon(struct vino_channel_settings *vcs)
3358{
3359 unsigned int incoming;
3360 int ret;
3361 if (vcs->reading)
3362 return -EBUSY;
3363
3364 if (vcs->streaming)
3365 return 0;
3366
3367 // TODO: check queue type
3368
3369 if (vino_queue_get_length(&vcs->fb_queue) < 1) {
3370 dprintk("no buffers allocated\n");
3371 return -EINVAL;
3372 }
3373
3374 ret = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
3375 if (ret) {
3376 dprintk("vino_queue_get_incoming() failed\n");
3377 return -EINVAL;
3378 }
3379
3380 vcs->streaming = 1;
3381
3382 if (incoming > 0) {
3383 ret = vino_capture_next(vcs, 1);
3384 if (ret) {
3385 vcs->streaming = 0;
3386
3387 dprintk("couldn't start capture\n");
3388 return -EINVAL;
3389 }
3390 }
3391
3392 return 0;
3393}
3394
3395static int vino_v4l2_streamoff(struct vino_channel_settings *vcs)
3396{
3397 if (vcs->reading)
3398 return -EBUSY;
3399
3400 if (!vcs->streaming)
3401 return 0;
3402
3403 vino_capture_stop(vcs);
3404 vcs->streaming = 0;
3405
3406 return 0;
3407}
3408
3409static int vino_v4l2_queryctrl(struct vino_channel_settings *vcs,
3410 struct v4l2_queryctrl *queryctrl)
3411{
3412 int i;
3413 int err = 0;
3414
3415 spin_lock(&vino_drvdata->input_lock);
3416
3417 switch (vcs->input) {
3418 case VINO_INPUT_D1:
3419 for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
3420 if (vino_indycam_v4l2_controls[i].id ==
3421 queryctrl->id) {
3422 memcpy(queryctrl,
3423 &vino_indycam_v4l2_controls[i],
3424 sizeof(struct v4l2_queryctrl));
3425 goto found;
3426 }
3427 }
3428
3429 err = -EINVAL;
3430 break;
3431 case VINO_INPUT_COMPOSITE:
3432 case VINO_INPUT_SVIDEO:
3433 for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
3434 if (vino_saa7191_v4l2_controls[i].id ==
3435 queryctrl->id) {
3436 memcpy(queryctrl,
3437 &vino_saa7191_v4l2_controls[i],
3438 sizeof(struct v4l2_queryctrl));
3439 goto found;
3440 }
3441 }
3442
3443 err = -EINVAL;
3444 break;
3445 default:
3446 err = -EINVAL;
3447 }
3448
3449 found:
3450 spin_unlock(&vino_drvdata->input_lock);
3451
3452 return err;
3453}
3454
3455static int vino_v4l2_g_ctrl(struct vino_channel_settings *vcs,
3456 struct v4l2_control *control)
3457{
3458 struct indycam_control indycam_ctrl;
3459 struct saa7191_control saa7191_ctrl;
3460 int err = 0;
3461
3462 spin_lock(&vino_drvdata->input_lock);
3463
3464 switch (vcs->input) {
3465 case VINO_INPUT_D1:
3466 i2c_camera_command(DECODER_INDYCAM_GET_CONTROLS,
3467 &indycam_ctrl);
3468
3469 switch(control->id) {
3470 case V4L2_CID_AUTOGAIN:
3471 control->value = indycam_ctrl.agc;
3472 break;
3473 case V4L2_CID_AUTO_WHITE_BALANCE:
3474 control->value = indycam_ctrl.awb;
3475 break;
3476 case V4L2_CID_GAIN:
3477 control->value = indycam_ctrl.gain;
3478 break;
3479 case V4L2_CID_PRIVATE_BASE:
3480 control->value = indycam_ctrl.red_saturation;
3481 break;
3482 case V4L2_CID_PRIVATE_BASE + 1:
3483 control->value = indycam_ctrl.blue_saturation;
3484 break;
3485 case V4L2_CID_RED_BALANCE:
3486 control->value = indycam_ctrl.red_balance;
3487 break;
3488 case V4L2_CID_BLUE_BALANCE:
3489 control->value = indycam_ctrl.blue_balance;
3490 break;
3491 case V4L2_CID_EXPOSURE:
3492 control->value = indycam_ctrl.shutter;
3493 break;
3494 case V4L2_CID_GAMMA:
3495 control->value = indycam_ctrl.gamma;
3496 break;
3497 default:
3498 err = -EINVAL;
3499 }
3500 break;
3501 case VINO_INPUT_COMPOSITE:
3502 case VINO_INPUT_SVIDEO:
3503 i2c_decoder_command(DECODER_SAA7191_GET_CONTROLS,
3504 &saa7191_ctrl);
3505
3506 switch(control->id) {
3507 case V4L2_CID_HUE:
3508 control->value = saa7191_ctrl.hue;
3509 break;
3510 case V4L2_CID_PRIVATE_BASE:
3511 control->value = saa7191_ctrl.vtrc;
3512 break;
3513 default:
3514 err = -EINVAL;
3515 }
3516 break;
3517 default:
3518 err = -EINVAL;
3519 }
3520
3521 spin_unlock(&vino_drvdata->input_lock);
3522
3523 return err;
3524}
3525
3526static int vino_v4l2_s_ctrl(struct vino_channel_settings *vcs,
3527 struct v4l2_control *control)
3528{
3529 struct indycam_control indycam_ctrl;
3530 struct saa7191_control saa7191_ctrl;
3531 int i;
3532 int err = 0;
3533
3534 spin_lock(&vino_drvdata->input_lock);
3535
3536 switch (vcs->input) {
3537 case VINO_INPUT_D1:
3538 for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
3539 if (vino_indycam_v4l2_controls[i].id ==
3540 control->id) {
3541 if ((control->value >=
3542 vino_indycam_v4l2_controls[i].minimum)
3543 && (control->value <=
3544 vino_indycam_v4l2_controls[i].
3545 maximum)) {
3546 goto ok1;
3547 } else {
3548 err = -ERANGE;
3549 goto error;
3550 }
3551 }
3552 }
3553 err = -EINVAL;
3554 goto error;
3555
3556ok1:
3557 indycam_ctrl.agc = INDYCAM_VALUE_UNCHANGED;
3558 indycam_ctrl.awb = INDYCAM_VALUE_UNCHANGED;
3559 indycam_ctrl.shutter = INDYCAM_VALUE_UNCHANGED;
3560 indycam_ctrl.gain = INDYCAM_VALUE_UNCHANGED;
3561 indycam_ctrl.red_balance = INDYCAM_VALUE_UNCHANGED;
3562 indycam_ctrl.blue_balance = INDYCAM_VALUE_UNCHANGED;
3563 indycam_ctrl.red_saturation = INDYCAM_VALUE_UNCHANGED;
3564 indycam_ctrl.blue_saturation = INDYCAM_VALUE_UNCHANGED;
3565 indycam_ctrl.gamma = INDYCAM_VALUE_UNCHANGED;
3566
3567 switch(control->id) {
3568 case V4L2_CID_AUTOGAIN:
3569 indycam_ctrl.agc = control->value;
3570 break;
3571 case V4L2_CID_AUTO_WHITE_BALANCE:
3572 indycam_ctrl.awb = control->value;
3573 break;
3574 case V4L2_CID_GAIN:
3575 indycam_ctrl.gain = control->value;
3576 break;
3577 case V4L2_CID_PRIVATE_BASE:
3578 indycam_ctrl.red_saturation = control->value;
3579 break;
3580 case V4L2_CID_PRIVATE_BASE + 1:
3581 indycam_ctrl.blue_saturation = control->value;
3582 break;
3583 case V4L2_CID_RED_BALANCE:
3584 indycam_ctrl.red_balance = control->value;
3585 break;
3586 case V4L2_CID_BLUE_BALANCE:
3587 indycam_ctrl.blue_balance = control->value;
3588 break;
3589 case V4L2_CID_EXPOSURE:
3590 indycam_ctrl.shutter = control->value;
3591 break;
3592 case V4L2_CID_GAMMA:
3593 indycam_ctrl.gamma = control->value;
3594 break;
3595 default:
3596 err = -EINVAL;
3597 }
3598
3599 if (!err)
3600 i2c_camera_command(DECODER_INDYCAM_SET_CONTROLS,
3601 &indycam_ctrl);
3602 break;
3603 case VINO_INPUT_COMPOSITE:
3604 case VINO_INPUT_SVIDEO:
3605 for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
3606 if (vino_saa7191_v4l2_controls[i].id ==
3607 control->id) {
3608 if ((control->value >=
3609 vino_saa7191_v4l2_controls[i].minimum)
3610 && (control->value <=
3611 vino_saa7191_v4l2_controls[i].
3612 maximum)) {
3613 goto ok2;
3614 } else {
3615 err = -ERANGE;
3616 goto error;
3617 }
3618 }
3619 }
3620 err = -EINVAL;
3621 goto error;
3622
3623ok2:
3624 saa7191_ctrl.hue = SAA7191_VALUE_UNCHANGED;
3625 saa7191_ctrl.vtrc = SAA7191_VALUE_UNCHANGED;
3626
3627 switch(control->id) {
3628 case V4L2_CID_HUE:
3629 saa7191_ctrl.hue = control->value;
3630 break;
3631 case V4L2_CID_PRIVATE_BASE:
3632 saa7191_ctrl.vtrc = control->value;
3633 break;
3634 default:
3635 err = -EINVAL;
3636 }
3637
3638 if (!err)
3639 i2c_decoder_command(DECODER_SAA7191_SET_CONTROLS,
3640 &saa7191_ctrl);
3641 break;
3642 default:
3643 err = -EINVAL;
3644 }
3645
3646error:
3647 spin_unlock(&vino_drvdata->input_lock);
3648
3649 return err;
3650}
3651
3652/* File operations */
3653
3654static int vino_open(struct inode *inode, struct file *file)
3655{
3656 struct video_device *dev = video_devdata(file);
3657 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3658 int ret = 0;
3659 dprintk("open(): channel = %c\n",
3660 (vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B');
3661
3662 down(&vcs->sem);
3663
3664 if (vcs->users) {
3665 dprintk("open(): driver busy\n");
3666 ret = -EBUSY;
3667 goto out;
3668 }
3669
3670 ret = vino_acquire_input(vcs);
3671 if (ret) {
3672 dprintk("open(): vino_acquire_input() failed\n");
3673 goto out;
3674 }
3675
3676 vcs->users++;
3677
3678 out:
3679 up(&vcs->sem);
3680
3681 dprintk("open(): %s!\n", ret ? "failed" : "complete");
3682
3683 return ret;
3684}
3685
3686static int vino_close(struct inode *inode, struct file *file)
3687{
3688 struct video_device *dev = video_devdata(file);
3689 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3690 dprintk("close():\n");
3691
3692 down(&vcs->sem);
3693
3694 vcs->users--;
3695
3696 if (!vcs->users) {
3697 vino_release_input(vcs);
3698
3699 /* stop DMA and free buffers */
3700 vino_capture_stop(vcs);
3701 vino_queue_free(&vcs->fb_queue);
3702 }
3703
3704 up(&vcs->sem);
3705
3706 return 0;
3707}
3708
3709static void vino_vm_open(struct vm_area_struct *vma)
3710{
3711 struct vino_framebuffer *fb = vma->vm_private_data;
3712
3713 fb->map_count++;
3714 dprintk("vino_vm_open(): count = %d\n", fb->map_count);
3715}
3716
3717static void vino_vm_close(struct vm_area_struct *vma)
3718{
3719 struct vino_framebuffer *fb = vma->vm_private_data;
3720
3721 fb->map_count--;
3722 dprintk("vino_vm_close(): count = %d\n", fb->map_count);
3723}
3724
3725static struct vm_operations_struct vino_vm_ops = {
3726 .open = vino_vm_open,
3727 .close = vino_vm_close,
3728};
3729
3730static int vino_mmap(struct file *file, struct vm_area_struct *vma)
3731{
3732 struct video_device *dev = video_devdata(file);
3733 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3734
3735 unsigned long start = vma->vm_start;
3736 unsigned long size = vma->vm_end - vma->vm_start;
3737 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
3738
3739 struct vino_framebuffer *fb = NULL;
3740 unsigned int i, length;
3741 int ret = 0;
3742
3743 dprintk("mmap():\n");
3744
3745 // TODO: reject mmap if already mapped
3746
3747 if (down_interruptible(&vcs->sem))
3748 return -EINTR;
3749
3750 if (vcs->reading) {
3751 ret = -EBUSY;
3752 goto out;
3753 }
3754
3755 // TODO: check queue type
3756
3757 if (!(vma->vm_flags & VM_WRITE)) {
3758 dprintk("mmap(): app bug: PROT_WRITE please\n");
3759 ret = -EINVAL;
3760 goto out;
3761 }
3762 if (!(vma->vm_flags & VM_SHARED)) {
3763 dprintk("mmap(): app bug: MAP_SHARED please\n");
3764 ret = -EINVAL;
3765 goto out;
3766 }
3767
3768 /* find the correct buffer using offset */
3769 length = vino_queue_get_length(&vcs->fb_queue);
3770 if (length == 0) {
3771 dprintk("mmap(): queue not initialized\n");
3772 ret = -EINVAL;
3773 goto out;
3774 }
3775
3776 for (i = 0; i < length; i++) {
3777 fb = vino_queue_get_buffer(&vcs->fb_queue, i);
3778 if (fb == NULL) {
3779 dprintk("mmap(): vino_queue_get_buffer() failed\n");
3780 ret = -EINVAL;
3781 goto out;
3782 }
3783
3784 if (fb->offset == offset)
3785 goto found;
3786 }
3787
3788 dprintk("mmap(): invalid offset = %lu\n", offset);
3789 ret = -EINVAL;
3790 goto out;
3791
3792found:
3793 dprintk("mmap(): buffer = %d\n", i);
3794
3795 if (size > (fb->desc_table.page_count * PAGE_SIZE)) {
3796 dprintk("mmap(): failed: size = %lu > %lu\n",
3797 size, fb->desc_table.page_count * PAGE_SIZE);
3798 ret = -EINVAL;
3799 goto out;
3800 }
3801
3802 for (i = 0; i < fb->desc_table.page_count; i++) {
3803 unsigned long pfn =
3804 virt_to_phys((void *)fb->desc_table.virtual[i]) >>
3805 PAGE_SHIFT;
3806
3807 if (size < PAGE_SIZE)
3808 break;
3809
3810 // protection was: PAGE_READONLY
3811 if (remap_pfn_range(vma, start, pfn, PAGE_SIZE,
3812 vma->vm_page_prot)) {
3813 dprintk("mmap(): remap_pfn_range() failed\n");
3814 ret = -EAGAIN;
3815 goto out;
3816 }
3817
3818 start += PAGE_SIZE;
3819 size -= PAGE_SIZE;
3820 }
3821
3822 fb->map_count = 1;
3823
3824 vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
3825 vma->vm_flags &= ~VM_IO;
3826 vma->vm_private_data = fb;
3827 vma->vm_file = file;
3828 vma->vm_ops = &vino_vm_ops;
3829
3830out:
3831 up(&vcs->sem);
3832
3833 return ret;
3834}
3835
3836static unsigned int vino_poll(struct file *file, poll_table *pt)
3837{
3838 struct video_device *dev = video_devdata(file);
3839 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3840 unsigned int outgoing;
3841 unsigned int ret = 0;
3842
3843 // lock mutex (?)
3844 // TODO: this has to be corrected for different read modes
3845
3846 dprintk("poll():\n");
3847
3848 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
3849 dprintk("poll(): vino_queue_get_outgoing() failed\n");
3850 ret = POLLERR;
3851 goto error;
3852 }
3853 if (outgoing > 0)
3854 goto over;
3855
3856 poll_wait(file, &vcs->fb_queue.frame_wait_queue, pt);
3857
3858 if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
3859 dprintk("poll(): vino_queue_get_outgoing() failed\n");
3860 ret = POLLERR;
3861 goto error;
3862 }
3863
3864over:
3865 dprintk("poll(): data %savailable\n",
3866 (outgoing > 0) ? "" : "not ");
3867 if (outgoing > 0) {
3868 ret = POLLIN | POLLRDNORM;
3869 }
3870
3871error:
3872
3873 return ret;
3874}
3875
3876static int vino_do_ioctl(struct inode *inode, struct file *file,
3877 unsigned int cmd, void *arg)
3878{
3879 struct video_device *dev = video_devdata(file);
3880 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3881
3882 switch (_IOC_TYPE(cmd)) {
3883 case 'v':
3884 dprintk("ioctl(): V4L1 unsupported (0x%08x)\n", cmd);
3885 break;
3886 case 'V':
3887 dprintk("ioctl(): V4L2 %s (0x%08x)\n",
3888 v4l2_ioctl_names[_IOC_NR(cmd)], cmd);
3889 break;
3890 default:
3891 dprintk("ioctl(): unsupported command 0x%08x\n", cmd);
3892 }
3893
3894 switch (cmd) {
3895 /* TODO: V4L1 interface (use compatibility layer?) */
3896 /* V4L2 interface */
3897 case VIDIOC_QUERYCAP: {
3898 vino_v4l2_querycap(arg);
3899 break;
3900 }
3901 case VIDIOC_ENUMINPUT: {
3902 return vino_v4l2_enuminput(vcs, arg);
3903 }
3904 case VIDIOC_G_INPUT: {
3905 return vino_v4l2_g_input(vcs, arg);
3906 }
3907 case VIDIOC_S_INPUT: {
3908 return vino_v4l2_s_input(vcs, arg);
3909 }
3910 case VIDIOC_ENUMSTD: {
3911 return vino_v4l2_enumstd(vcs, arg);
3912 }
3913 case VIDIOC_G_STD: {
3914 return vino_v4l2_g_std(vcs, arg);
3915 }
3916 case VIDIOC_S_STD: {
3917 return vino_v4l2_s_std(vcs, arg);
3918 }
3919 case VIDIOC_ENUM_FMT: {
3920 return vino_v4l2_enum_fmt(vcs, arg);
3921 }
3922 case VIDIOC_TRY_FMT: {
3923 return vino_v4l2_try_fmt(vcs, arg);
3924 }
3925 case VIDIOC_G_FMT: {
3926 return vino_v4l2_g_fmt(vcs, arg);
3927 }
3928 case VIDIOC_S_FMT: {
3929 return vino_v4l2_s_fmt(vcs, arg);
3930 }
3931 case VIDIOC_CROPCAP: {
3932 return vino_v4l2_cropcap(vcs, arg);
3933 }
3934 case VIDIOC_G_CROP: {
3935 return vino_v4l2_g_crop(vcs, arg);
3936 }
3937 case VIDIOC_S_CROP: {
3938 return vino_v4l2_s_crop(vcs, arg);
3939 }
3940 case VIDIOC_G_PARM: {
3941 return vino_v4l2_g_parm(vcs, arg);
3942 }
3943 case VIDIOC_S_PARM: {
3944 return vino_v4l2_s_parm(vcs, arg);
3945 }
3946 case VIDIOC_REQBUFS: {
3947 return vino_v4l2_reqbufs(vcs, arg);
3948 }
3949 case VIDIOC_QUERYBUF: {
3950 return vino_v4l2_querybuf(vcs, arg);
3951 }
3952 case VIDIOC_QBUF: {
3953 return vino_v4l2_qbuf(vcs, arg);
3954 }
3955 case VIDIOC_DQBUF: {
3956 return vino_v4l2_dqbuf(vcs, arg, file->f_flags & O_NONBLOCK);
3957 }
3958 case VIDIOC_STREAMON: {
3959 return vino_v4l2_streamon(vcs);
3960 }
3961 case VIDIOC_STREAMOFF: {
3962 return vino_v4l2_streamoff(vcs);
3963 }
3964 case VIDIOC_QUERYCTRL: {
3965 return vino_v4l2_queryctrl(vcs, arg);
3966 }
3967 case VIDIOC_G_CTRL: {
3968 return vino_v4l2_g_ctrl(vcs, arg);
3969 }
3970 case VIDIOC_S_CTRL: {
3971 return vino_v4l2_s_ctrl(vcs, arg);
3972 }
3973 default:
3974 return -ENOIOCTLCMD;
3975 }
3976
3977 return 0;
3978}
3979
3980static int vino_ioctl(struct inode *inode, struct file *file,
3981 unsigned int cmd, unsigned long arg)
3982{
3983 struct video_device *dev = video_devdata(file);
3984 struct vino_channel_settings *vcs = video_get_drvdata(dev);
3985 int ret;
3986
3987 if (down_interruptible(&vcs->sem))
3988 return -EINTR;
3989
3990 ret = video_usercopy(inode, file, cmd, arg, vino_do_ioctl);
3991
3992 up(&vcs->sem);
3993
3994 return ret;
3995}
3996
3997/* Initialization and cleanup */
3998
3999// __initdata
4000static int vino_init_stage = 0;
4001
4002static struct file_operations vino_fops = {
212 .owner = THIS_MODULE, 4003 .owner = THIS_MODULE,
213 .type = VID_TYPE_CAPTURE | VID_TYPE_SUBCAPTURE,
214 .hardware = VID_HARDWARE_VINO,
215 .name = "VINO",
216 .open = vino_open, 4004 .open = vino_open,
217 .close = vino_close, 4005 .release = vino_close,
218 .ioctl = vino_ioctl, 4006 .ioctl = vino_ioctl,
219 .mmap = vino_mmap, 4007 .mmap = vino_mmap,
4008 .poll = vino_poll,
4009 .llseek = no_llseek,
220}; 4010};
221 4011
222static int __init vino_init(void) 4012static struct video_device v4l_device_template = {
4013 .name = "NOT SET",
4014 //.type = VID_TYPE_CAPTURE | VID_TYPE_SUBCAPTURE |
4015 // VID_TYPE_CLIPPING | VID_TYPE_SCALES, VID_TYPE_OVERLAY
4016 .hardware = VID_HARDWARE_VINO,
4017 .fops = &vino_fops,
4018 .minor = -1,
4019};
4020
4021static void vino_module_cleanup(int stage)
4022{
4023 switch(stage) {
4024 case 10:
4025 video_unregister_device(vino_drvdata->b.v4l_device);
4026 vino_drvdata->b.v4l_device = NULL;
4027 case 9:
4028 video_unregister_device(vino_drvdata->a.v4l_device);
4029 vino_drvdata->a.v4l_device = NULL;
4030 case 8:
4031 vino_i2c_del_bus();
4032 case 7:
4033 free_irq(SGI_VINO_IRQ, NULL);
4034 case 6:
4035 if (vino_drvdata->b.v4l_device) {
4036 video_device_release(vino_drvdata->b.v4l_device);
4037 vino_drvdata->b.v4l_device = NULL;
4038 }
4039 case 5:
4040 if (vino_drvdata->a.v4l_device) {
4041 video_device_release(vino_drvdata->a.v4l_device);
4042 vino_drvdata->a.v4l_device = NULL;
4043 }
4044 case 4:
4045 /* all entries in dma_cpu dummy table have the same address */
4046 dma_unmap_single(NULL,
4047 vino_drvdata->dummy_desc_table.dma_cpu[0],
4048 PAGE_SIZE, DMA_FROM_DEVICE);
4049 dma_free_coherent(NULL, VINO_DUMMY_DESC_COUNT
4050 * sizeof(dma_addr_t),
4051 (void *)vino_drvdata->
4052 dummy_desc_table.dma_cpu,
4053 vino_drvdata->dummy_desc_table.dma);
4054 case 3:
4055 free_page(vino_drvdata->dummy_page);
4056 case 2:
4057 kfree(vino_drvdata);
4058 case 1:
4059 iounmap(vino);
4060 case 0:
4061 break;
4062 default:
4063 dprintk("vino_module_cleanup(): invalid cleanup stage = %d\n",
4064 stage);
4065 }
4066}
4067
4068static int vino_probe(void)
223{ 4069{
224 unsigned long rev; 4070 unsigned long rev_id;
225 int i, ret = 0;
226 4071
227 /* VINO is Indy specific beast */ 4072 if (ip22_is_fullhouse()) {
228 if (ip22_is_fullhouse()) 4073 printk(KERN_ERR "VINO doesn't exist in IP22 Fullhouse\n");
229 return -ENODEV; 4074 return -ENODEV;
4075 }
230 4076
231 /*
232 * VINO is in the EISA address space, so the sysid register will tell
233 * us if the EISA_PRESENT pin on MC has been pulled low.
234 *
235 * If EISA_PRESENT is not set we definitely don't have a VINO equiped
236 * system.
237 */
238 if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) { 4077 if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
239 printk(KERN_ERR "VINO not found\n"); 4078 printk(KERN_ERR "VINO is not found (EISA BUS not present)\n");
240 return -ENODEV; 4079 return -ENODEV;
241 } 4080 }
242 4081
243 vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino)); 4082 vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino));
244 if (!vino) 4083 if (!vino) {
4084 printk(KERN_ERR "VINO: ioremap() failed\n");
245 return -EIO; 4085 return -EIO;
4086 }
4087 vino_init_stage++;
246 4088
247 /* Okay, once we know that VINO is present we'll read its revision 4089 if (get_dbe(rev_id, &(vino->rev_id))) {
248 * safe way. One never knows... */ 4090 printk(KERN_ERR "Failed to read VINO revision register\n");
249 if (get_dbe(rev, &(vino->rev_id))) { 4091 vino_module_cleanup(vino_init_stage);
250 printk(KERN_ERR "VINO: failed to read revision register\n"); 4092 return -ENODEV;
251 ret = -ENODEV;
252 goto out_unmap;
253 } 4093 }
254 if (VINO_ID_VALUE(rev) != VINO_CHIP_ID) { 4094
255 printk(KERN_ERR "VINO is not VINO (Rev/ID: 0x%04lx)\n", rev); 4095 if (VINO_ID_VALUE(rev_id) != VINO_CHIP_ID) {
256 ret = -ENODEV; 4096 printk(KERN_ERR "Unknown VINO chip ID (Rev/ID: 0x%02lx)\n",
257 goto out_unmap; 4097 rev_id);
4098 vino_module_cleanup(vino_init_stage);
4099 return -ENODEV;
258 } 4100 }
259 printk(KERN_INFO "VINO Rev: 0x%02lx\n", VINO_REV_NUM(rev));
260 4101
261 Vino = (struct vino_video *) 4102 printk(KERN_INFO "VINO with chip ID %ld, revision %ld found\n",
262 kmalloc(sizeof(struct vino_video), GFP_KERNEL); 4103 VINO_ID_VALUE(rev_id), VINO_REV_NUM(rev_id));
263 if (!Vino) { 4104
264 ret = -ENOMEM; 4105 return 0;
265 goto out_unmap; 4106}
4107
4108static int vino_init(void)
4109{
4110 dma_addr_t dma_dummy_address;
4111 int i;
4112
4113 vino_drvdata = (struct vino_settings *)
4114 kmalloc(sizeof(struct vino_settings), GFP_KERNEL);
4115 if (!vino_drvdata) {
4116 vino_module_cleanup(vino_init_stage);
4117 return -ENOMEM;
266 } 4118 }
4119 memset(vino_drvdata, 0, sizeof(struct vino_settings));
4120 vino_init_stage++;
267 4121
268 Vino->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 4122 /* create a dummy dma descriptor */
269 if (!Vino->dummy_page) { 4123 vino_drvdata->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
270 ret = -ENOMEM; 4124 if (!vino_drvdata->dummy_page) {
271 goto out_free_vino; 4125 vino_module_cleanup(vino_init_stage);
4126 return -ENOMEM;
272 } 4127 }
273 for (i = 0; i < 4; i++) 4128 vino_init_stage++;
274 Vino->dummy_buf[i] = PHYSADDR(Vino->dummy_page); 4129
4130 // TODO: use page_count in dummy_desc_table
4131
4132 vino_drvdata->dummy_desc_table.dma_cpu =
4133 dma_alloc_coherent(NULL,
4134 VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t),
4135 &vino_drvdata->dummy_desc_table.dma,
4136 GFP_KERNEL | GFP_DMA);
4137 if (!vino_drvdata->dummy_desc_table.dma_cpu) {
4138 vino_module_cleanup(vino_init_stage);
4139 return -ENOMEM;
4140 }
4141 vino_init_stage++;
4142
4143 dma_dummy_address = dma_map_single(NULL,
4144 (void *)vino_drvdata->dummy_page,
4145 PAGE_SIZE, DMA_FROM_DEVICE);
4146 for (i = 0; i < VINO_DUMMY_DESC_COUNT; i++) {
4147 vino_drvdata->dummy_desc_table.dma_cpu[i] = dma_dummy_address;
4148 }
4149
4150 /* initialize VINO */
275 4151
276 vino->control = 0; 4152 vino->control = 0;
277 /* prevent VINO from throwing spurious interrupts */ 4153 vino->a.next_4_desc = vino_drvdata->dummy_desc_table.dma;
278 vino->a.next_4_desc = PHYSADDR(Vino->dummy_buf); 4154 vino->b.next_4_desc = vino_drvdata->dummy_desc_table.dma;
279 vino->b.next_4_desc = PHYSADDR(Vino->dummy_buf); 4155 udelay(VINO_DESC_FETCH_DELAY);
280 udelay(5); 4156
281 vino->intr_status = 0; 4157 vino->intr_status = 0;
282 /* set threshold level */
283 vino->a.fifo_thres = threshold_a;
284 vino->b.fifo_thres = threshold_b;
285 4158
286 init_MUTEX(&Vino->input_lock); 4159 vino->a.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
4160 vino->b.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
4161
4162 return 0;
4163}
4164
4165static int vino_init_channel_settings(struct vino_channel_settings *vcs,
4166 unsigned int channel, const char *name)
4167{
4168 vcs->channel = channel;
4169 vcs->input = VINO_INPUT_NONE;
4170 vcs->alpha = 0;
4171 vcs->users = 0;
4172 vcs->data_format = VINO_DATA_FMT_GREY;
4173 vcs->data_norm = VINO_DATA_NORM_NTSC;
4174 vcs->decimation = 1;
4175 vino_set_default_clipping(vcs);
4176 vino_set_default_framerate(vcs);
4177
4178 vcs->capturing = 0;
4179
4180 init_MUTEX(&vcs->sem);
4181 spin_lock_init(&vcs->capture_lock);
4182
4183 init_MUTEX(&vcs->fb_queue.queue_sem);
4184 spin_lock_init(&vcs->fb_queue.queue_lock);
4185 init_waitqueue_head(&vcs->fb_queue.frame_wait_queue);
4186
4187 vcs->v4l_device = video_device_alloc();
4188 if (!vcs->v4l_device) {
4189 vino_module_cleanup(vino_init_stage);
4190 return -ENOMEM;
4191 }
4192 vino_init_stage++;
4193
4194 memcpy(vcs->v4l_device, &v4l_device_template,
4195 sizeof(struct video_device));
4196 strcpy(vcs->v4l_device->name, name);
4197 vcs->v4l_device->release = video_device_release;
4198
4199 video_set_drvdata(vcs->v4l_device, vcs);
4200
4201 return 0;
4202}
4203
4204static int __init vino_module_init(void)
4205{
4206 int ret;
4207
4208 printk(KERN_INFO "SGI VINO driver version %s\n",
4209 VINO_MODULE_VERSION);
4210
4211 ret = vino_probe();
4212 if (ret)
4213 return ret;
4214
4215 ret = vino_init();
4216 if (ret)
4217 return ret;
4218
4219 /* initialize data structures */
287 4220
288 if (request_irq(SGI_VINO_IRQ, vino_interrupt, 0, vinostr, NULL)) { 4221 spin_lock_init(&vino_drvdata->vino_lock);
289 printk(KERN_ERR "VINO: irq%02d registration failed\n", 4222 spin_lock_init(&vino_drvdata->input_lock);
4223
4224 ret = vino_init_channel_settings(&vino_drvdata->a, VINO_CHANNEL_A,
4225 vino_v4l_device_name_a);
4226 if (ret)
4227 return ret;
4228
4229 ret = vino_init_channel_settings(&vino_drvdata->b, VINO_CHANNEL_B,
4230 vino_v4l_device_name_b);
4231 if (ret)
4232 return ret;
4233
4234 /* initialize hardware and register V4L devices */
4235
4236 ret = request_irq(SGI_VINO_IRQ, vino_interrupt, 0,
4237 vino_driver_description, NULL);
4238 if (ret) {
4239 printk(KERN_ERR "VINO: requesting IRQ %02d failed\n",
290 SGI_VINO_IRQ); 4240 SGI_VINO_IRQ);
291 ret = -EAGAIN; 4241 vino_module_cleanup(vino_init_stage);
292 goto out_free_page; 4242 return -EAGAIN;
293 } 4243 }
4244 vino_init_stage++;
294 4245
295 ret = vino_i2c_add_bus(); 4246 ret = vino_i2c_add_bus();
296 if (ret) { 4247 if (ret) {
297 printk(KERN_ERR "VINO: I2C bus registration failed\n"); 4248 printk(KERN_ERR "VINO I2C bus registration failed\n");
298 goto out_free_irq; 4249 vino_module_cleanup(vino_init_stage);
4250 return ret;
299 } 4251 }
4252 vino_init_stage++;
300 4253
301 if (video_register_device(&Vino->chA.vdev, VFL_TYPE_GRABBER, -1) < 0) { 4254 ret = video_register_device(vino_drvdata->a.v4l_device,
302 printk("%s, chnl %d: device registration failed.\n", 4255 VFL_TYPE_GRABBER, -1);
303 Vino->chA.vdev.name, Vino->chA.chan); 4256 if (ret < 0) {
304 ret = -EINVAL; 4257 printk(KERN_ERR "VINO channel A Video4Linux-device "
305 goto out_i2c_del_bus; 4258 "registration failed\n");
4259 vino_module_cleanup(vino_init_stage);
4260 return -EINVAL;
306 } 4261 }
307 if (video_register_device(&Vino->chB.vdev, VFL_TYPE_GRABBER, -1) < 0) { 4262 vino_init_stage++;
308 printk("%s, chnl %d: device registration failed.\n", 4263
309 Vino->chB.vdev.name, Vino->chB.chan); 4264 ret = video_register_device(vino_drvdata->b.v4l_device,
310 ret = -EINVAL; 4265 VFL_TYPE_GRABBER, -1);
311 goto out_unregister_vdev; 4266 if (ret < 0) {
4267 printk(KERN_ERR "VINO channel B Video4Linux-device "
4268 "registration failed\n");
4269 vino_module_cleanup(vino_init_stage);
4270 return -EINVAL;
312 } 4271 }
4272 vino_init_stage++;
313 4273
314 return 0; 4274#if defined(CONFIG_KMOD) && defined(MODULE)
4275 request_module("saa7191");
4276 request_module("indycam");
4277#endif
315 4278
316out_unregister_vdev: 4279 dprintk("init complete!\n");
317 video_unregister_device(&Vino->chA.vdev);
318out_i2c_del_bus:
319 vino_i2c_del_bus();
320out_free_irq:
321 free_irq(SGI_VINO_IRQ, NULL);
322out_free_page:
323 free_page(Vino->dummy_page);
324out_free_vino:
325 kfree(Vino);
326out_unmap:
327 iounmap(vino);
328 4280
329 return ret; 4281 return 0;
330} 4282}
331 4283
332static void __exit vino_exit(void) 4284static void __exit vino_module_exit(void)
333{ 4285{
334 video_unregister_device(&Vino->chA.vdev); 4286 dprintk("exiting, stage = %d ...\n", vino_init_stage);
335 video_unregister_device(&Vino->chB.vdev); 4287 vino_module_cleanup(vino_init_stage);
336 vino_i2c_del_bus(); 4288 dprintk("cleanup complete, exit!\n");
337 free_irq(SGI_VINO_IRQ, NULL);
338 free_page(Vino->dummy_page);
339 kfree(Vino);
340 iounmap(vino);
341} 4289}
342 4290
343module_init(vino_init); 4291module_init(vino_module_init);
344module_exit(vino_exit); 4292module_exit(vino_module_exit);
345
346MODULE_DESCRIPTION("Video4Linux driver for SGI Indy VINO (IndyCam)");
347MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/vino.h b/drivers/media/video/vino.h
index d2fce472f35a..de2d615ae7c9 100644
--- a/drivers/media/video/vino.h
+++ b/drivers/media/video/vino.h
@@ -1,13 +1,19 @@
1/* 1/*
2 * Driver for the VINO (Video In No Out) system found in SGI Indys.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License version 2 as published by the Free Software Foundation.
6 *
2 * Copyright (C) 1999 Ulf Karlsson <ulfc@bun.falkenberg.se> 7 * Copyright (C) 1999 Ulf Karlsson <ulfc@bun.falkenberg.se>
3 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> 8 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
4 */ 9 */
5 10
6#ifndef VINO_H 11#ifndef _VINO_H_
7#define VINO_H 12#define _VINO_H_
8 13
9#define VINO_BASE 0x00080000 /* Vino is in the EISA address space, 14#define VINO_BASE 0x00080000 /* Vino is in the EISA address space,
10 * but it is not an EISA bus card */ 15 * but it is not an EISA bus card */
16#define VINO_PAGE_SIZE 4096
11 17
12struct sgi_vino_channel { 18struct sgi_vino_channel {
13 u32 _pad_alpha; 19 u32 _pad_alpha;
@@ -21,8 +27,9 @@ struct sgi_vino_channel {
21 u32 _pad_clip_end; 27 u32 _pad_clip_end;
22 volatile u32 clip_end; 28 volatile u32 clip_end;
23 29
30#define VINO_FRAMERT_FULL 0xfff
24#define VINO_FRAMERT_PAL (1<<0) /* 0=NTSC 1=PAL */ 31#define VINO_FRAMERT_PAL (1<<0) /* 0=NTSC 1=PAL */
25#define VINO_FRAMERT_RT(x) (((x) & 0x1fff) << 1) /* bits 1:12 */ 32#define VINO_FRAMERT_RT(x) (((x) & 0xfff) << 1) /* bits 1:12 */
26 u32 _pad_frame_rate; 33 u32 _pad_frame_rate;
27 volatile u32 frame_rate; 34 volatile u32 frame_rate;
28 35
@@ -67,18 +74,18 @@ struct sgi_vino {
67 volatile u32 rev_id; 74 volatile u32 rev_id;
68 75
69#define VINO_CTRL_LITTLE_ENDIAN (1<<0) 76#define VINO_CTRL_LITTLE_ENDIAN (1<<0)
70#define VINO_CTRL_A_FIELD_TRANS_INT (1<<1) /* Field transferred int */ 77#define VINO_CTRL_A_EOF_INT (1<<1) /* Field transferred int */
71#define VINO_CTRL_A_FIFO_OF_INT (1<<2) /* FIFO overflow int */ 78#define VINO_CTRL_A_FIFO_INT (1<<2) /* FIFO overflow int */
72#define VINO_CTRL_A_END_DESC_TBL_INT (1<<3) /* End of desc table int */ 79#define VINO_CTRL_A_EOD_INT (1<<3) /* End of desc table int */
73#define VINO_CTRL_A_INT (VINO_CTRL_A_FIELD_TRANS_INT | \ 80#define VINO_CTRL_A_INT (VINO_CTRL_A_EOF_INT | \
74 VINO_CTRL_A_FIFO_OF_INT | \ 81 VINO_CTRL_A_FIFO_INT | \
75 VINO_CTRL_A_END_DESC_TBL_INT) 82 VINO_CTRL_A_EOD_INT)
76#define VINO_CTRL_B_FIELD_TRANS_INT (1<<4) /* Field transferred int */ 83#define VINO_CTRL_B_EOF_INT (1<<4) /* Field transferred int */
77#define VINO_CTRL_B_FIFO_OF_INT (1<<5) /* FIFO overflow int */ 84#define VINO_CTRL_B_FIFO_INT (1<<5) /* FIFO overflow int */
78#define VINO_CTRL_B_END_DESC_TBL_INT (1<<6) /* End of desc table int */ 85#define VINO_CTRL_B_EOD_INT (1<<6) /* End of desc table int */
79#define VINO_CTRL_B_INT (VINO_CTRL_B_FIELD_TRANS_INT | \ 86#define VINO_CTRL_B_INT (VINO_CTRL_B_EOF_INT | \
80 VINO_CTRL_B_FIFO_OF_INT | \ 87 VINO_CTRL_B_FIFO_INT | \
81 VINO_CTRL_B_END_DESC_TBL_INT) 88 VINO_CTRL_B_EOD_INT)
82#define VINO_CTRL_A_DMA_ENBL (1<<7) 89#define VINO_CTRL_A_DMA_ENBL (1<<7)
83#define VINO_CTRL_A_INTERLEAVE_ENBL (1<<8) 90#define VINO_CTRL_A_INTERLEAVE_ENBL (1<<8)
84#define VINO_CTRL_A_SYNC_ENBL (1<<9) 91#define VINO_CTRL_A_SYNC_ENBL (1<<9)
@@ -104,18 +111,18 @@ struct sgi_vino {
104 u32 _pad_control; 111 u32 _pad_control;
105 volatile u32 control; 112 volatile u32 control;
106 113
107#define VINO_INTSTAT_A_FIELD_TRANS (1<<0) /* Field transferred int */ 114#define VINO_INTSTAT_A_EOF (1<<0) /* Field transferred int */
108#define VINO_INTSTAT_A_FIFO_OF (1<<1) /* FIFO overflow int */ 115#define VINO_INTSTAT_A_FIFO (1<<1) /* FIFO overflow int */
109#define VINO_INTSTAT_A_END_DESC_TBL (1<<2) /* End of desc table int */ 116#define VINO_INTSTAT_A_EOD (1<<2) /* End of desc table int */
110#define VINO_INTSTAT_A (VINO_INTSTAT_A_FIELD_TRANS | \ 117#define VINO_INTSTAT_A (VINO_INTSTAT_A_EOF | \
111 VINO_INTSTAT_A_FIFO_OF | \ 118 VINO_INTSTAT_A_FIFO | \
112 VINO_INTSTAT_A_END_DESC_TBL) 119 VINO_INTSTAT_A_EOD)
113#define VINO_INTSTAT_B_FIELD_TRANS (1<<3) /* Field transferred int */ 120#define VINO_INTSTAT_B_EOF (1<<3) /* Field transferred int */
114#define VINO_INTSTAT_B_FIFO_OF (1<<4) /* FIFO overflow int */ 121#define VINO_INTSTAT_B_FIFO (1<<4) /* FIFO overflow int */
115#define VINO_INTSTAT_B_END_DESC_TBL (1<<5) /* End of desc table int */ 122#define VINO_INTSTAT_B_EOD (1<<5) /* End of desc table int */
116#define VINO_INTSTAT_B (VINO_INTSTAT_B_FIELD_TRANS | \ 123#define VINO_INTSTAT_B (VINO_INTSTAT_B_EOF | \
117 VINO_INTSTAT_B_FIFO_OF | \ 124 VINO_INTSTAT_B_FIFO | \
118 VINO_INTSTAT_B_END_DESC_TBL) 125 VINO_INTSTAT_B_EOD)
119 u32 _pad_intr_status; 126 u32 _pad_intr_status;
120 volatile u32 intr_status; 127 volatile u32 intr_status;
121 128
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index dea6589d1533..7fc692a8f5b0 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -6,7 +6,7 @@ menu "Misc devices"
6 6
7config IBM_ASM 7config IBM_ASM
8 tristate "Device driver for IBM RSA service processor" 8 tristate "Device driver for IBM RSA service processor"
9 depends on X86 && PCI && EXPERIMENTAL && BROKEN 9 depends on X86 && PCI && EXPERIMENTAL
10 ---help--- 10 ---help---
11 This option enables device driver support for in-band access to the 11 This option enables device driver support for in-band access to the
12 IBM RSA (Condor) service processor in eServer xSeries systems. 12 IBM RSA (Condor) service processor in eServer xSeries systems.
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 914804512dba..7e98434cfa37 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -25,15 +25,15 @@
25#include <linux/termios.h> 25#include <linux/termios.h>
26#include <linux/tty.h> 26#include <linux/tty.h>
27#include <linux/serial_core.h> 27#include <linux/serial_core.h>
28#include <linux/serial.h>
29#include <linux/serial_reg.h> 28#include <linux/serial_reg.h>
29#include <linux/serial_8250.h>
30#include "ibmasm.h" 30#include "ibmasm.h"
31#include "lowlevel.h" 31#include "lowlevel.h"
32 32
33 33
34void ibmasm_register_uart(struct service_processor *sp) 34void ibmasm_register_uart(struct service_processor *sp)
35{ 35{
36 struct serial_struct serial; 36 struct uart_port uport;
37 void __iomem *iomem_base; 37 void __iomem *iomem_base;
38 38
39 iomem_base = sp->base_address + SCOUT_COM_B_BASE; 39 iomem_base = sp->base_address + SCOUT_COM_B_BASE;
@@ -47,14 +47,14 @@ void ibmasm_register_uart(struct service_processor *sp)
47 return; 47 return;
48 } 48 }
49 49
50 memset(&serial, 0, sizeof(serial)); 50 memset(&uport, 0, sizeof(struct uart_port));
51 serial.irq = sp->irq; 51 uport.irq = sp->irq;
52 serial.baud_base = 3686400 / 16; 52 uport.uartclk = 3686400;
53 serial.flags = UPF_AUTOPROBE | UPF_SHARE_IRQ; 53 uport.flags = UPF_AUTOPROBE | UPF_SHARE_IRQ;
54 serial.io_type = UPIO_MEM; 54 uport.iotype = UPIO_MEM;
55 serial.iomem_base = iomem_base; 55 uport.membase = iomem_base;
56 56
57 sp->serial_line = register_serial(&serial); 57 sp->serial_line = serial8250_register_port(&uport);
58 if (sp->serial_line < 0) { 58 if (sp->serial_line < 0) {
59 dev_err(sp->dev, "Failed to register serial port\n"); 59 dev_err(sp->dev, "Failed to register serial port\n");
60 return; 60 return;
@@ -68,5 +68,5 @@ void ibmasm_unregister_uart(struct service_processor *sp)
68 return; 68 return;
69 69
70 disable_uart_interrupts(sp->base_address); 70 disable_uart_interrupts(sp->base_address);
71 unregister_serial(sp->serial_line); 71 serial8250_unregister_port(sp->serial_line);
72} 72}
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c
index 0a8165974ba7..0a117c61cd18 100644
--- a/drivers/mmc/mmc.c
+++ b/drivers/mmc/mmc.c
@@ -2,6 +2,8 @@
2 * linux/drivers/mmc/mmc.c 2 * linux/drivers/mmc/mmc.c
3 * 3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved. 4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * SD support Copyright (C) 2005 Pierre Ossman, All Rights Reserved.
5 * 7 *
6 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -16,6 +18,8 @@
16#include <linux/delay.h> 18#include <linux/delay.h>
17#include <linux/pagemap.h> 19#include <linux/pagemap.h>
18#include <linux/err.h> 20#include <linux/err.h>
21#include <asm/scatterlist.h>
22#include <linux/scatterlist.h>
19 23
20#include <linux/mmc/card.h> 24#include <linux/mmc/card.h>
21#include <linux/mmc/host.h> 25#include <linux/mmc/host.h>
@@ -172,7 +176,81 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
172 176
173EXPORT_SYMBOL(mmc_wait_for_cmd); 177EXPORT_SYMBOL(mmc_wait_for_cmd);
174 178
179/**
180 * mmc_wait_for_app_cmd - start an application command and wait for
181 completion
182 * @host: MMC host to start command
183 * @rca: RCA to send MMC_APP_CMD to
184 * @cmd: MMC command to start
185 * @retries: maximum number of retries
186 *
187 * Sends a MMC_APP_CMD, checks the card response, sends the command
188 * in the parameter and waits for it to complete. Return any error
189 * that occurred while the command was executing. Do not attempt to
190 * parse the response.
191 */
192int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
193 struct mmc_command *cmd, int retries)
194{
195 struct mmc_request mrq;
196 struct mmc_command appcmd;
197
198 int i, err;
199
200 BUG_ON(host->card_busy == NULL);
201 BUG_ON(retries < 0);
202
203 err = MMC_ERR_INVALID;
204
205 /*
206 * We have to resend MMC_APP_CMD for each attempt so
207 * we cannot use the retries field in mmc_command.
208 */
209 for (i = 0;i <= retries;i++) {
210 memset(&mrq, 0, sizeof(struct mmc_request));
211
212 appcmd.opcode = MMC_APP_CMD;
213 appcmd.arg = rca << 16;
214 appcmd.flags = MMC_RSP_R1;
215 appcmd.retries = 0;
216 memset(appcmd.resp, 0, sizeof(appcmd.resp));
217 appcmd.data = NULL;
218
219 mrq.cmd = &appcmd;
220 appcmd.data = NULL;
221
222 mmc_wait_for_req(host, &mrq);
223
224 if (appcmd.error) {
225 err = appcmd.error;
226 continue;
227 }
228
229 /* Check that card supported application commands */
230 if (!(appcmd.resp[0] & R1_APP_CMD))
231 return MMC_ERR_FAILED;
232
233 memset(&mrq, 0, sizeof(struct mmc_request));
234
235 memset(cmd->resp, 0, sizeof(cmd->resp));
236 cmd->retries = 0;
237
238 mrq.cmd = cmd;
239 cmd->data = NULL;
175 240
241 mmc_wait_for_req(host, &mrq);
242
243 err = cmd->error;
244 if (cmd->error == MMC_ERR_NONE)
245 break;
246 }
247
248 return err;
249}
250
251EXPORT_SYMBOL(mmc_wait_for_app_cmd);
252
253static int mmc_select_card(struct mmc_host *host, struct mmc_card *card);
176 254
177/** 255/**
178 * __mmc_claim_host - exclusively claim a host 256 * __mmc_claim_host - exclusively claim a host
@@ -206,16 +284,10 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
206 spin_unlock_irqrestore(&host->lock, flags); 284 spin_unlock_irqrestore(&host->lock, flags);
207 remove_wait_queue(&host->wq, &wait); 285 remove_wait_queue(&host->wq, &wait);
208 286
209 if (card != (void *)-1 && host->card_selected != card) { 287 if (card != (void *)-1) {
210 struct mmc_command cmd; 288 err = mmc_select_card(host, card);
211 289 if (err != MMC_ERR_NONE)
212 host->card_selected = card; 290 return err;
213
214 cmd.opcode = MMC_SELECT_CARD;
215 cmd.arg = card->rca << 16;
216 cmd.flags = MMC_RSP_R1;
217
218 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
219 } 291 }
220 292
221 return err; 293 return err;
@@ -245,6 +317,63 @@ void mmc_release_host(struct mmc_host *host)
245 317
246EXPORT_SYMBOL(mmc_release_host); 318EXPORT_SYMBOL(mmc_release_host);
247 319
320static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
321{
322 int err;
323 struct mmc_command cmd;
324
325 BUG_ON(host->card_busy == NULL);
326
327 if (host->card_selected == card)
328 return MMC_ERR_NONE;
329
330 host->card_selected = card;
331
332 cmd.opcode = MMC_SELECT_CARD;
333 cmd.arg = card->rca << 16;
334 cmd.flags = MMC_RSP_R1;
335
336 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
337 if (err != MMC_ERR_NONE)
338 return err;
339
340 /*
341 * Default bus width is 1 bit.
342 */
343 host->ios.bus_width = MMC_BUS_WIDTH_1;
344
345 /*
346 * We can only change the bus width of the selected
347 * card so therefore we have to put the handling
348 * here.
349 */
350 if (host->caps & MMC_CAP_4_BIT_DATA) {
351 /*
352 * The card is in 1 bit mode by default so
353 * we only need to change if it supports the
354 * wider version.
355 */
356 if (mmc_card_sd(card) &&
357 (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) {
358 struct mmc_command cmd;
359 cmd.opcode = SD_APP_SET_BUS_WIDTH;
360 cmd.arg = SD_BUS_WIDTH_4;
361 cmd.flags = MMC_RSP_R1;
362
363 err = mmc_wait_for_app_cmd(host, card->rca, &cmd,
364 CMD_RETRIES);
365 if (err != MMC_ERR_NONE)
366 return err;
367
368 host->ios.bus_width = MMC_BUS_WIDTH_4;
369 }
370 }
371
372 host->ops->set_ios(host, &host->ios);
373
374 return MMC_ERR_NONE;
375}
376
248/* 377/*
249 * Ensure that no card is selected. 378 * Ensure that no card is selected.
250 */ 379 */
@@ -322,48 +451,69 @@ static void mmc_decode_cid(struct mmc_card *card)
322 451
323 memset(&card->cid, 0, sizeof(struct mmc_cid)); 452 memset(&card->cid, 0, sizeof(struct mmc_cid));
324 453
325 /* 454 if (mmc_card_sd(card)) {
326 * The selection of the format here is guesswork based upon 455 /*
327 * information people have sent to date. 456 * SD doesn't currently have a version field so we will
328 */ 457 * have to assume we can parse this.
329 switch (card->csd.mmca_vsn) { 458 */
330 case 0: /* MMC v1.? */ 459 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
331 case 1: /* MMC v1.4 */ 460 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
332 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24); 461 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
333 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 462 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
334 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 463 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
335 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 464 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
336 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 465 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
337 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 466 card->cid.hwrev = UNSTUFF_BITS(resp, 60, 4);
338 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 467 card->cid.fwrev = UNSTUFF_BITS(resp, 56, 4);
339 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8); 468 card->cid.serial = UNSTUFF_BITS(resp, 24, 32);
340 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4); 469 card->cid.year = UNSTUFF_BITS(resp, 12, 8);
341 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4); 470 card->cid.month = UNSTUFF_BITS(resp, 8, 4);
342 card->cid.serial = UNSTUFF_BITS(resp, 16, 24); 471
343 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 472 card->cid.year += 2000; /* SD cards year offset */
344 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 473 } else {
345 break; 474 /*
346 475 * The selection of the format here is based upon published
347 case 2: /* MMC v2.x ? */ 476 * specs from sandisk and from what people have reported.
348 case 3: /* MMC v3.x ? */ 477 */
349 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8); 478 switch (card->csd.mmca_vsn) {
350 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16); 479 case 0: /* MMC v1.0 - v1.2 */
351 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8); 480 case 1: /* MMC v1.4 */
352 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8); 481 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
353 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8); 482 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
354 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); 483 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
355 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); 484 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
356 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); 485 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
357 card->cid.serial = UNSTUFF_BITS(resp, 16, 32); 486 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
358 card->cid.month = UNSTUFF_BITS(resp, 12, 4); 487 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
359 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; 488 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
360 break; 489 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
361 490 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
362 default: 491 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
363 printk("%s: card has unknown MMCA version %d\n", 492 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
364 mmc_hostname(card->host), card->csd.mmca_vsn); 493 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
365 mmc_card_set_bad(card); 494 break;
366 break; 495
496 case 2: /* MMC v2.0 - v2.2 */
497 case 3: /* MMC v3.1 - v3.3 */
498 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
499 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
500 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
501 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
502 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
503 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
504 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
505 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
506 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
507 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
508 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
509 break;
510
511 default:
512 printk("%s: card has unknown MMCA version %d\n",
513 mmc_hostname(card->host), card->csd.mmca_vsn);
514 mmc_card_set_bad(card);
515 break;
516 }
367 } 517 }
368} 518}
369 519
@@ -376,34 +526,86 @@ static void mmc_decode_csd(struct mmc_card *card)
376 unsigned int e, m, csd_struct; 526 unsigned int e, m, csd_struct;
377 u32 *resp = card->raw_csd; 527 u32 *resp = card->raw_csd;
378 528
379 /* 529 if (mmc_card_sd(card)) {
380 * We only understand CSD structure v1.1 and v2. 530 csd_struct = UNSTUFF_BITS(resp, 126, 2);
381 * v2 has extra information in bits 15, 11 and 10. 531 if (csd_struct != 0) {
382 */ 532 printk("%s: unrecognised CSD structure version %d\n",
383 csd_struct = UNSTUFF_BITS(resp, 126, 2); 533 mmc_hostname(card->host), csd_struct);
384 if (csd_struct != 1 && csd_struct != 2) { 534 mmc_card_set_bad(card);
385 printk("%s: unrecognised CSD structure version %d\n", 535 return;
386 mmc_hostname(card->host), csd_struct); 536 }
387 mmc_card_set_bad(card); 537
388 return; 538 m = UNSTUFF_BITS(resp, 115, 4);
539 e = UNSTUFF_BITS(resp, 112, 3);
540 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
541 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
542
543 m = UNSTUFF_BITS(resp, 99, 4);
544 e = UNSTUFF_BITS(resp, 96, 3);
545 csd->max_dtr = tran_exp[e] * tran_mant[m];
546 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
547
548 e = UNSTUFF_BITS(resp, 47, 3);
549 m = UNSTUFF_BITS(resp, 62, 12);
550 csd->capacity = (1 + m) << (e + 2);
551
552 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
553 } else {
554 /*
555 * We only understand CSD structure v1.1 and v1.2.
556 * v1.2 has extra information in bits 15, 11 and 10.
557 */
558 csd_struct = UNSTUFF_BITS(resp, 126, 2);
559 if (csd_struct != 1 && csd_struct != 2) {
560 printk("%s: unrecognised CSD structure version %d\n",
561 mmc_hostname(card->host), csd_struct);
562 mmc_card_set_bad(card);
563 return;
564 }
565
566 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
567 m = UNSTUFF_BITS(resp, 115, 4);
568 e = UNSTUFF_BITS(resp, 112, 3);
569 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
570 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
571
572 m = UNSTUFF_BITS(resp, 99, 4);
573 e = UNSTUFF_BITS(resp, 96, 3);
574 csd->max_dtr = tran_exp[e] * tran_mant[m];
575 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
576
577 e = UNSTUFF_BITS(resp, 47, 3);
578 m = UNSTUFF_BITS(resp, 62, 12);
579 csd->capacity = (1 + m) << (e + 2);
580
581 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
389 } 582 }
583}
390 584
391 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); 585/*
392 m = UNSTUFF_BITS(resp, 115, 4); 586 * Given a 64-bit response, decode to our card SCR structure.
393 e = UNSTUFF_BITS(resp, 112, 3); 587 */
394 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10; 588static void mmc_decode_scr(struct mmc_card *card)
395 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100; 589{
590 struct sd_scr *scr = &card->scr;
591 unsigned int scr_struct;
592 u32 resp[4];
593
594 BUG_ON(!mmc_card_sd(card));
396 595
397 m = UNSTUFF_BITS(resp, 99, 4); 596 resp[3] = card->raw_scr[1];
398 e = UNSTUFF_BITS(resp, 96, 3); 597 resp[2] = card->raw_scr[0];
399 csd->max_dtr = tran_exp[e] * tran_mant[m];
400 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
401 598
402 e = UNSTUFF_BITS(resp, 47, 3); 599 scr_struct = UNSTUFF_BITS(resp, 60, 4);
403 m = UNSTUFF_BITS(resp, 62, 12); 600 if (scr_struct != 0) {
404 csd->capacity = (1 + m) << (e + 2); 601 printk("%s: unrecognised SCR structure version %d\n",
602 mmc_hostname(card->host), scr_struct);
603 mmc_card_set_bad(card);
604 return;
605 }
405 606
406 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); 607 scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);
608 scr->bus_widths = UNSTUFF_BITS(resp, 48, 4);
407} 609}
408 610
409/* 611/*
@@ -487,6 +689,7 @@ static void mmc_power_up(struct mmc_host *host)
487 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 689 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
488 host->ios.chip_select = MMC_CS_DONTCARE; 690 host->ios.chip_select = MMC_CS_DONTCARE;
489 host->ios.power_mode = MMC_POWER_UP; 691 host->ios.power_mode = MMC_POWER_UP;
692 host->ios.bus_width = MMC_BUS_WIDTH_1;
490 host->ops->set_ios(host, &host->ios); 693 host->ops->set_ios(host, &host->ios);
491 694
492 mmc_delay(1); 695 mmc_delay(1);
@@ -505,6 +708,7 @@ static void mmc_power_off(struct mmc_host *host)
505 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; 708 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
506 host->ios.chip_select = MMC_CS_DONTCARE; 709 host->ios.chip_select = MMC_CS_DONTCARE;
507 host->ios.power_mode = MMC_POWER_OFF; 710 host->ios.power_mode = MMC_POWER_OFF;
711 host->ios.bus_width = MMC_BUS_WIDTH_1;
508 host->ops->set_ios(host, &host->ios); 712 host->ops->set_ios(host, &host->ios);
509} 713}
510 714
@@ -536,6 +740,34 @@ static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
536 return err; 740 return err;
537} 741}
538 742
743static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
744{
745 struct mmc_command cmd;
746 int i, err = 0;
747
748 cmd.opcode = SD_APP_OP_COND;
749 cmd.arg = ocr;
750 cmd.flags = MMC_RSP_R3;
751
752 for (i = 100; i; i--) {
753 err = mmc_wait_for_app_cmd(host, 0, &cmd, CMD_RETRIES);
754 if (err != MMC_ERR_NONE)
755 break;
756
757 if (cmd.resp[0] & MMC_CARD_BUSY || ocr == 0)
758 break;
759
760 err = MMC_ERR_TIMEOUT;
761
762 mmc_delay(10);
763 }
764
765 if (rocr)
766 *rocr = cmd.resp[0];
767
768 return err;
769}
770
539/* 771/*
540 * Discover cards by requesting their CID. If this command 772 * Discover cards by requesting their CID. If this command
541 * times out, it is not an error; there are no further cards 773 * times out, it is not an error; there are no further cards
@@ -579,13 +811,38 @@ static void mmc_discover_cards(struct mmc_host *host)
579 811
580 card->state &= ~MMC_STATE_DEAD; 812 card->state &= ~MMC_STATE_DEAD;
581 813
582 cmd.opcode = MMC_SET_RELATIVE_ADDR; 814 if (host->mode == MMC_MODE_SD) {
583 cmd.arg = card->rca << 16; 815 mmc_card_set_sd(card);
584 cmd.flags = MMC_RSP_R1;
585 816
586 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); 817 cmd.opcode = SD_SEND_RELATIVE_ADDR;
587 if (err != MMC_ERR_NONE) 818 cmd.arg = 0;
588 mmc_card_set_dead(card); 819 cmd.flags = MMC_RSP_R1;
820
821 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
822 if (err != MMC_ERR_NONE)
823 mmc_card_set_dead(card);
824 else {
825 card->rca = cmd.resp[0] >> 16;
826
827 if (!host->ops->get_ro) {
828 printk(KERN_WARNING "%s: host does not "
829 "support reading read-only "
830 "switch. assuming write-enable.\n",
831 mmc_hostname(host));
832 } else {
833 if (host->ops->get_ro(host))
834 mmc_card_set_readonly(card);
835 }
836 }
837 } else {
838 cmd.opcode = MMC_SET_RELATIVE_ADDR;
839 cmd.arg = card->rca << 16;
840 cmd.flags = MMC_RSP_R1;
841
842 err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
843 if (err != MMC_ERR_NONE)
844 mmc_card_set_dead(card);
845 }
589 } 846 }
590} 847}
591 848
@@ -617,6 +874,79 @@ static void mmc_read_csds(struct mmc_host *host)
617 } 874 }
618} 875}
619 876
877static void mmc_read_scrs(struct mmc_host *host)
878{
879 int err;
880 struct mmc_card *card;
881
882 struct mmc_request mrq;
883 struct mmc_command cmd;
884 struct mmc_data data;
885
886 struct scatterlist sg;
887
888 list_for_each_entry(card, &host->cards, node) {
889 if (card->state & (MMC_STATE_DEAD|MMC_STATE_PRESENT))
890 continue;
891 if (!mmc_card_sd(card))
892 continue;
893
894 err = mmc_select_card(host, card);
895 if (err != MMC_ERR_NONE) {
896 mmc_card_set_dead(card);
897 continue;
898 }
899
900 memset(&cmd, 0, sizeof(struct mmc_command));
901
902 cmd.opcode = MMC_APP_CMD;
903 cmd.arg = card->rca << 16;
904 cmd.flags = MMC_RSP_R1;
905
906 err = mmc_wait_for_cmd(host, &cmd, 0);
907 if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD)) {
908 mmc_card_set_dead(card);
909 continue;
910 }
911
912 memset(&cmd, 0, sizeof(struct mmc_command));
913
914 cmd.opcode = SD_APP_SEND_SCR;
915 cmd.arg = 0;
916 cmd.flags = MMC_RSP_R1;
917
918 memset(&data, 0, sizeof(struct mmc_data));
919
920 data.timeout_ns = card->csd.tacc_ns * 10;
921 data.timeout_clks = card->csd.tacc_clks * 10;
922 data.blksz_bits = 3;
923 data.blocks = 1;
924 data.flags = MMC_DATA_READ;
925 data.sg = &sg;
926 data.sg_len = 1;
927
928 memset(&mrq, 0, sizeof(struct mmc_request));
929
930 mrq.cmd = &cmd;
931 mrq.data = &data;
932
933 sg_init_one(&sg, (u8*)card->raw_scr, 8);
934
935 err = mmc_wait_for_req(host, &mrq);
936 if (err != MMC_ERR_NONE) {
937 mmc_card_set_dead(card);
938 continue;
939 }
940
941 card->raw_scr[0] = ntohl(card->raw_scr[0]);
942 card->raw_scr[1] = ntohl(card->raw_scr[1]);
943
944 mmc_decode_scr(card);
945 }
946
947 mmc_deselect_cards(host);
948}
949
620static unsigned int mmc_calculate_clock(struct mmc_host *host) 950static unsigned int mmc_calculate_clock(struct mmc_host *host)
621{ 951{
622 struct mmc_card *card; 952 struct mmc_card *card;
@@ -669,12 +999,24 @@ static void mmc_setup(struct mmc_host *host)
669 int err; 999 int err;
670 u32 ocr; 1000 u32 ocr;
671 1001
1002 host->mode = MMC_MODE_SD;
1003
672 mmc_power_up(host); 1004 mmc_power_up(host);
673 mmc_idle_cards(host); 1005 mmc_idle_cards(host);
674 1006
675 err = mmc_send_op_cond(host, 0, &ocr); 1007 err = mmc_send_app_op_cond(host, 0, &ocr);
676 if (err != MMC_ERR_NONE) 1008
677 return; 1009 /*
1010 * If we fail to detect any SD cards then try
1011 * searching for MMC cards.
1012 */
1013 if (err != MMC_ERR_NONE) {
1014 host->mode = MMC_MODE_MMC;
1015
1016 err = mmc_send_op_cond(host, 0, &ocr);
1017 if (err != MMC_ERR_NONE)
1018 return;
1019 }
678 1020
679 host->ocr = mmc_select_voltage(host, ocr); 1021 host->ocr = mmc_select_voltage(host, ocr);
680 1022
@@ -714,7 +1056,10 @@ static void mmc_setup(struct mmc_host *host)
714 * all get the idea that they should be ready for CMD2. 1056 * all get the idea that they should be ready for CMD2.
715 * (My SanDisk card seems to need this.) 1057 * (My SanDisk card seems to need this.)
716 */ 1058 */
717 mmc_send_op_cond(host, host->ocr, NULL); 1059 if (host->mode == MMC_MODE_SD)
1060 mmc_send_app_op_cond(host, host->ocr, NULL);
1061 else
1062 mmc_send_op_cond(host, host->ocr, NULL);
718 1063
719 mmc_discover_cards(host); 1064 mmc_discover_cards(host);
720 1065
@@ -725,6 +1070,9 @@ static void mmc_setup(struct mmc_host *host)
725 host->ops->set_ios(host, &host->ios); 1070 host->ops->set_ios(host, &host->ios);
726 1071
727 mmc_read_csds(host); 1072 mmc_read_csds(host);
1073
1074 if (host->mode == MMC_MODE_SD)
1075 mmc_read_scrs(host);
728} 1076}
729 1077
730 1078
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index d4eee99c2bf6..fa83f15fdf16 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -95,6 +95,10 @@ static int mmc_blk_open(struct inode *inode, struct file *filp)
95 if (md->usage == 2) 95 if (md->usage == 2)
96 check_disk_change(inode->i_bdev); 96 check_disk_change(inode->i_bdev);
97 ret = 0; 97 ret = 0;
98
99 if ((filp->f_mode & FMODE_WRITE) &&
100 mmc_card_readonly(md->queue.card))
101 ret = -EROFS;
98 } 102 }
99 103
100 return ret; 104 return ret;
@@ -403,9 +407,10 @@ static int mmc_blk_probe(struct mmc_card *card)
403 if (err) 407 if (err)
404 goto out; 408 goto out;
405 409
406 printk(KERN_INFO "%s: %s %s %dKiB\n", 410 printk(KERN_INFO "%s: %s %s %dKiB %s\n",
407 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 411 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
408 (card->csd.capacity << card->csd.read_blkbits) / 1024); 412 (card->csd.capacity << card->csd.read_blkbits) / 1024,
413 mmc_card_readonly(card)?"(ro)":"");
409 414
410 mmc_set_drvdata(card, md); 415 mmc_set_drvdata(card, md);
411 add_disk(md->disk); 416 add_disk(md->disk);
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c
index ad8949810fc5..3f4a66ca9555 100644
--- a/drivers/mmc/mmc_sysfs.c
+++ b/drivers/mmc/mmc_sysfs.c
@@ -34,6 +34,7 @@ MMC_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
34 card->raw_cid[2], card->raw_cid[3]); 34 card->raw_cid[2], card->raw_cid[3]);
35MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], 35MMC_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
36 card->raw_csd[2], card->raw_csd[3]); 36 card->raw_csd[2], card->raw_csd[3]);
37MMC_ATTR(scr, "%08x%08x\n", card->raw_scr[0], card->raw_scr[1]);
37MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year); 38MMC_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
38MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev); 39MMC_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
39MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev); 40MMC_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
@@ -57,6 +58,8 @@ static struct device_attribute mmc_dev_attrs[] = {
57 __ATTR_NULL 58 __ATTR_NULL
58}; 59};
59 60
61static struct device_attribute mmc_dev_attr_scr = MMC_ATTR_RO(scr);
62
60 63
61static void mmc_release_card(struct device *dev) 64static void mmc_release_card(struct device *dev)
62{ 65{
@@ -207,10 +210,20 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
207 */ 210 */
208int mmc_register_card(struct mmc_card *card) 211int mmc_register_card(struct mmc_card *card)
209{ 212{
213 int ret;
214
210 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id), 215 snprintf(card->dev.bus_id, sizeof(card->dev.bus_id),
211 "%s:%04x", mmc_hostname(card->host), card->rca); 216 "%s:%04x", mmc_hostname(card->host), card->rca);
212 217
213 return device_add(&card->dev); 218 ret = device_add(&card->dev);
219 if (ret == 0) {
220 if (mmc_card_sd(card)) {
221 ret = device_create_file(&card->dev, &mmc_dev_attr_scr);
222 if (ret)
223 device_del(&card->dev);
224 }
225 }
226 return ret;
214} 227}
215 228
216/* 229/*
@@ -219,8 +232,12 @@ int mmc_register_card(struct mmc_card *card)
219 */ 232 */
220void mmc_remove_card(struct mmc_card *card) 233void mmc_remove_card(struct mmc_card *card)
221{ 234{
222 if (mmc_card_present(card)) 235 if (mmc_card_present(card)) {
236 if (mmc_card_sd(card))
237 device_remove_file(&card->dev, &mmc_dev_attr_scr);
238
223 device_del(&card->dev); 239 device_del(&card->dev);
240 }
224 241
225 put_device(&card->dev); 242 put_device(&card->dev);
226} 243}
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c
index b78beb1b0159..e99a53b09e32 100644
--- a/drivers/mmc/pxamci.c
+++ b/drivers/mmc/pxamci.c
@@ -362,6 +362,16 @@ static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
362 pxamci_start_cmd(host, mrq->cmd, cmdat); 362 pxamci_start_cmd(host, mrq->cmd, cmdat);
363} 363}
364 364
365static int pxamci_get_ro(struct mmc_host *mmc)
366{
367 struct pxamci_host *host = mmc_priv(mmc);
368
369 if (host->pdata && host->pdata->get_ro)
370 return host->pdata->get_ro(mmc->dev);
371 /* Host doesn't support read only detection so assume writeable */
372 return 0;
373}
374
365static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 375static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
366{ 376{
367 struct pxamci_host *host = mmc_priv(mmc); 377 struct pxamci_host *host = mmc_priv(mmc);
@@ -401,6 +411,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
401 411
402static struct mmc_host_ops pxamci_ops = { 412static struct mmc_host_ops pxamci_ops = {
403 .request = pxamci_request, 413 .request = pxamci_request,
414 .get_ro = pxamci_get_ro,
404 .set_ios = pxamci_set_ios, 415 .set_ios = pxamci_set_ios,
405}; 416};
406 417
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c
index 08ae22aed9e8..dec01d38c782 100644
--- a/drivers/mmc/wbsd.c
+++ b/drivers/mmc/wbsd.c
@@ -720,11 +720,28 @@ static void wbsd_prepare_data(struct wbsd_host* host, struct mmc_data* data)
720 * calculate CRC. 720 * calculate CRC.
721 * 721 *
722 * Space for CRC must be included in the size. 722 * Space for CRC must be included in the size.
723 * Two bytes are needed for each data line.
723 */ 724 */
724 blksize = (1 << data->blksz_bits) + 2; 725 if (host->bus_width == MMC_BUS_WIDTH_1)
726 {
727 blksize = (1 << data->blksz_bits) + 2;
728
729 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
730 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
731 }
732 else if (host->bus_width == MMC_BUS_WIDTH_4)
733 {
734 blksize = (1 << data->blksz_bits) + 2 * 4;
725 735
726 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); 736 wbsd_write_index(host, WBSD_IDX_PBSMSB, ((blksize >> 4) & 0xF0)
727 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); 737 | WBSD_DATA_WIDTH);
738 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
739 }
740 else
741 {
742 data->error = MMC_ERR_INVALID;
743 return;
744 }
728 745
729 /* 746 /*
730 * Clear the FIFO. This is needed even for DMA 747 * Clear the FIFO. This is needed even for DMA
@@ -960,9 +977,9 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
960 struct wbsd_host* host = mmc_priv(mmc); 977 struct wbsd_host* host = mmc_priv(mmc);
961 u8 clk, setup, pwr; 978 u8 clk, setup, pwr;
962 979
963 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u\n", 980 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
964 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select, 981 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
965 ios->vdd); 982 ios->vdd, ios->bus_width);
966 983
967 spin_lock_bh(&host->lock); 984 spin_lock_bh(&host->lock);
968 985
@@ -1010,6 +1027,7 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
1010 setup = wbsd_read_index(host, WBSD_IDX_SETUP); 1027 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
1011 if (ios->chip_select == MMC_CS_HIGH) 1028 if (ios->chip_select == MMC_CS_HIGH)
1012 { 1029 {
1030 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
1013 setup |= WBSD_DAT3_H; 1031 setup |= WBSD_DAT3_H;
1014 host->flags |= WBSD_FIGNORE_DETECT; 1032 host->flags |= WBSD_FIGNORE_DETECT;
1015 } 1033 }
@@ -1025,12 +1043,41 @@ static void wbsd_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
1025 } 1043 }
1026 wbsd_write_index(host, WBSD_IDX_SETUP, setup); 1044 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1027 1045
1046 /*
1047 * Store bus width for later. Will be used when
1048 * setting up the data transfer.
1049 */
1050 host->bus_width = ios->bus_width;
1051
1028 spin_unlock_bh(&host->lock); 1052 spin_unlock_bh(&host->lock);
1029} 1053}
1030 1054
1055static int wbsd_get_ro(struct mmc_host* mmc)
1056{
1057 struct wbsd_host* host = mmc_priv(mmc);
1058 u8 csr;
1059
1060 spin_lock_bh(&host->lock);
1061
1062 csr = inb(host->base + WBSD_CSR);
1063 csr |= WBSD_MSLED;
1064 outb(csr, host->base + WBSD_CSR);
1065
1066 mdelay(1);
1067
1068 csr = inb(host->base + WBSD_CSR);
1069 csr &= ~WBSD_MSLED;
1070 outb(csr, host->base + WBSD_CSR);
1071
1072 spin_unlock_bh(&host->lock);
1073
1074 return csr & WBSD_WRPT;
1075}
1076
1031static struct mmc_host_ops wbsd_ops = { 1077static struct mmc_host_ops wbsd_ops = {
1032 .request = wbsd_request, 1078 .request = wbsd_request,
1033 .set_ios = wbsd_set_ios, 1079 .set_ios = wbsd_set_ios,
1080 .get_ro = wbsd_get_ro,
1034}; 1081};
1035 1082
1036/*****************************************************************************\ 1083/*****************************************************************************\
@@ -1355,6 +1402,7 @@ static int __devinit wbsd_alloc_mmc(struct device* dev)
1355 mmc->f_min = 375000; 1402 mmc->f_min = 375000;
1356 mmc->f_max = 24000000; 1403 mmc->f_max = 24000000;
1357 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; 1404 mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34;
1405 mmc->caps = MMC_CAP_4_BIT_DATA;
1358 1406
1359 spin_lock_init(&host->lock); 1407 spin_lock_init(&host->lock);
1360 1408
diff --git a/drivers/mmc/wbsd.h b/drivers/mmc/wbsd.h
index 8af43549f5d5..9005b5241b3c 100644
--- a/drivers/mmc/wbsd.h
+++ b/drivers/mmc/wbsd.h
@@ -106,6 +106,8 @@
106#define WBSD_CLK_16M 0x02 106#define WBSD_CLK_16M 0x02
107#define WBSD_CLK_24M 0x03 107#define WBSD_CLK_24M 0x03
108 108
109#define WBSD_DATA_WIDTH 0x01
110
109#define WBSD_DAT3_H 0x08 111#define WBSD_DAT3_H 0x08
110#define WBSD_FIFO_RESET 0x04 112#define WBSD_FIFO_RESET 0x04
111#define WBSD_SOFT_RESET 0x02 113#define WBSD_SOFT_RESET 0x02
@@ -164,6 +166,7 @@ struct wbsd_host
164 int firsterr; /* See fifo functions */ 166 int firsterr; /* See fifo functions */
165 167
166 u8 clk; /* Current clock speed */ 168 u8 clk; /* Current clock speed */
169 unsigned char bus_width; /* Current bus width */
167 170
168 int config; /* Config port */ 171 int config; /* Config port */
169 u8 unlock_code; /* Code to unlock config */ 172 u8 unlock_code; /* Code to unlock config */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index eee5115658c8..04e54318bc6a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -526,6 +526,7 @@ static void nand_wait_ready(struct mtd_info *mtd)
526 do { 526 do {
527 if (this->dev_ready(mtd)) 527 if (this->dev_ready(mtd))
528 return; 528 return;
529 touch_softlockup_watchdog();
529 } while (time_before(jiffies, timeo)); 530 } while (time_before(jiffies, timeo));
530} 531}
531 532
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 07746b95fd83..455ba915ede7 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -973,6 +973,11 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
973 netif_device_detach(dev); 973 netif_device_detach(dev);
974 vortex_down(dev, 1); 974 vortex_down(dev, 1);
975 } 975 }
976 pci_save_state(pdev);
977 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
978 free_irq(dev->irq, dev);
979 pci_disable_device(pdev);
980 pci_set_power_state(pdev, pci_choose_state(pdev, state));
976 } 981 }
977 return 0; 982 return 0;
978} 983}
@@ -980,8 +985,19 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state)
980static int vortex_resume (struct pci_dev *pdev) 985static int vortex_resume (struct pci_dev *pdev)
981{ 986{
982 struct net_device *dev = pci_get_drvdata(pdev); 987 struct net_device *dev = pci_get_drvdata(pdev);
988 struct vortex_private *vp = netdev_priv(dev);
983 989
984 if (dev && dev->priv) { 990 if (dev && vp) {
991 pci_set_power_state(pdev, PCI_D0);
992 pci_restore_state(pdev);
993 pci_enable_device(pdev);
994 pci_set_master(pdev);
995 if (request_irq(dev->irq, vp->full_bus_master_rx ?
996 &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev)) {
997 printk(KERN_WARNING "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
998 pci_disable_device(pdev);
999 return -EBUSY;
1000 }
985 if (netif_running(dev)) { 1001 if (netif_running(dev)) {
986 vortex_up(dev); 1002 vortex_up(dev);
987 netif_device_attach(dev); 1003 netif_device_attach(dev);
@@ -1873,6 +1889,7 @@ vortex_timer(unsigned long data)
1873 { 1889 {
1874 spin_lock_bh(&vp->lock); 1890 spin_lock_bh(&vp->lock);
1875 mii_status = mdio_read(dev, vp->phys[0], 1); 1891 mii_status = mdio_read(dev, vp->phys[0], 1);
1892 mii_status = mdio_read(dev, vp->phys[0], 1);
1876 ok = 1; 1893 ok = 1;
1877 if (vortex_debug > 2) 1894 if (vortex_debug > 2)
1878 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", 1895 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ae9e7a579b94..6bb9232514b4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2058,6 +2058,13 @@ config BNX2
2058 To compile this driver as a module, choose M here: the module 2058 To compile this driver as a module, choose M here: the module
2059 will be called bnx2. This is recommended. 2059 will be called bnx2. This is recommended.
2060 2060
2061config SPIDER_NET
2062 tristate "Spider Gigabit Ethernet driver"
2063 depends on PCI && PPC_BPA
2064 help
2065 This driver supports the Gigabit Ethernet chips present on the
2066 Cell Processor-Based Blades from IBM.
2067
2061config GIANFAR 2068config GIANFAR
2062 tristate "Gianfar Ethernet" 2069 tristate "Gianfar Ethernet"
2063 depends on 85xx || 83xx 2070 depends on 85xx || 83xx
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 5baafcd55610..8645c843cf4d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -54,6 +54,8 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
54obj-$(CONFIG_FEALNX) += fealnx.o 54obj-$(CONFIG_FEALNX) += fealnx.o
55obj-$(CONFIG_TIGON3) += tg3.o 55obj-$(CONFIG_TIGON3) += tg3.o
56obj-$(CONFIG_BNX2) += bnx2.o 56obj-$(CONFIG_BNX2) += bnx2.o
57spidernet-y += spider_net.o spider_net_ethtool.o sungem_phy.o
58obj-$(CONFIG_SPIDER_NET) += spidernet.o
57obj-$(CONFIG_TC35815) += tc35815.o 59obj-$(CONFIG_TC35815) += tc35815.o
58obj-$(CONFIG_SKGE) += skge.o 60obj-$(CONFIG_SKGE) += skge.o
59obj-$(CONFIG_SK98LIN) += sk98lin/ 61obj-$(CONFIG_SK98LIN) += sk98lin/
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 91791ba37769..8a0af5453e21 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -275,7 +275,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
275 return 0; 275 return 0;
276out2: 276out2:
277 if (ei_status.reg0) 277 if (ei_status.reg0)
278 iounmap((void *)dev->mem_start); 278 iounmap(ei_status.mem);
279out1: 279out1:
280 free_irq(dev->irq, dev); 280 free_irq(dev->irq, dev);
281out: 281out:
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 4f9f69e22c1b..12ef52c193a3 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -597,7 +597,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
597 struct ArcProto *proto; 597 struct ArcProto *proto;
598 int txbuf; 598 int txbuf;
599 unsigned long flags; 599 unsigned long flags;
600 int freeskb = 0; 600 int freeskb, retval;
601 601
602 BUGMSG(D_DURING, 602 BUGMSG(D_DURING,
603 "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n", 603 "transmit requested (status=%Xh, txbufs=%d/%d, len=%d, protocol %x)\n",
@@ -615,7 +615,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
615 if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) { 615 if (skb->len - ARC_HDR_SIZE > XMTU && !proto->continue_tx) {
616 BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n"); 616 BUGMSG(D_NORMAL, "fixme: packet too large: compensating badly!\n");
617 dev_kfree_skb(skb); 617 dev_kfree_skb(skb);
618 return 0; /* don't try again */ 618 return NETDEV_TX_OK; /* don't try again */
619 } 619 }
620 620
621 /* We're busy transmitting a packet... */ 621 /* We're busy transmitting a packet... */
@@ -623,8 +623,11 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
623 623
624 spin_lock_irqsave(&lp->lock, flags); 624 spin_lock_irqsave(&lp->lock, flags);
625 AINTMASK(0); 625 AINTMASK(0);
626 626 if(lp->next_tx == -1)
627 txbuf = get_arcbuf(dev); 627 txbuf = get_arcbuf(dev);
628 else {
629 txbuf = -1;
630 }
628 if (txbuf != -1) { 631 if (txbuf != -1) {
629 if (proto->prepare_tx(dev, pkt, skb->len, txbuf) && 632 if (proto->prepare_tx(dev, pkt, skb->len, txbuf) &&
630 !proto->ack_tx) { 633 !proto->ack_tx) {
@@ -638,6 +641,8 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
638 lp->outgoing.skb = skb; 641 lp->outgoing.skb = skb;
639 lp->outgoing.pkt = pkt; 642 lp->outgoing.pkt = pkt;
640 643
644 freeskb = 0;
645
641 if (proto->continue_tx && 646 if (proto->continue_tx &&
642 proto->continue_tx(dev, txbuf)) { 647 proto->continue_tx(dev, txbuf)) {
643 BUGMSG(D_NORMAL, 648 BUGMSG(D_NORMAL,
@@ -645,10 +650,12 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
645 "(proto='%c')\n", proto->suffix); 650 "(proto='%c')\n", proto->suffix);
646 } 651 }
647 } 652 }
648 653 retval = NETDEV_TX_OK;
654 dev->trans_start = jiffies;
649 lp->next_tx = txbuf; 655 lp->next_tx = txbuf;
650 } else { 656 } else {
651 freeskb = 1; 657 retval = NETDEV_TX_BUSY;
658 freeskb = 0;
652 } 659 }
653 660
654 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 661 BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS());
@@ -664,7 +671,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
664 if (freeskb) { 671 if (freeskb) {
665 dev_kfree_skb(skb); 672 dev_kfree_skb(skb);
666 } 673 }
667 return 0; /* no need to try again */ 674 return retval; /* no need to try again */
668} 675}
669 676
670 677
@@ -690,7 +697,6 @@ static int go_tx(struct net_device *dev)
690 /* start sending */ 697 /* start sending */
691 ACOMMAND(TXcmd | (lp->cur_tx << 3)); 698 ACOMMAND(TXcmd | (lp->cur_tx << 3));
692 699
693 dev->trans_start = jiffies;
694 lp->stats.tx_packets++; 700 lp->stats.tx_packets++;
695 lp->lasttrans_dest = lp->lastload_dest; 701 lp->lasttrans_dest = lp->lastload_dest;
696 lp->lastload_dest = 0; 702 lp->lastload_dest = 0;
@@ -917,6 +923,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
917 923
918 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n", 924 BUGMSG(D_RECON, "Network reconfiguration detected (status=%Xh)\n",
919 status); 925 status);
926 /* MYRECON bit is at bit 7 of diagstatus */
927 if(diagstatus & 0x80)
928 BUGMSG(D_RECON,"Put out that recon myself\n");
920 929
921 /* is the RECON info empty or old? */ 930 /* is the RECON info empty or old? */
922 if (!lp->first_recon || !lp->last_recon || 931 if (!lp->first_recon || !lp->last_recon ||
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index ad011214c7f2..e01b6a78ec63 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -235,7 +235,7 @@ struct lance_private {
235#define MEM lp->mem 235#define MEM lp->mem
236#define DREG IO->data 236#define DREG IO->data
237#define AREG IO->addr 237#define AREG IO->addr
238#define REGA(a) ( AREG = (a), DREG ) 238#define REGA(a) (*( AREG = (a), &DREG ))
239 239
240/* Definitions for packet buffer access: */ 240/* Definitions for packet buffer access: */
241#define PKT_BUF_SZ 1544 241#define PKT_BUF_SZ 1544
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 6440a892bb81..e54fc10f6846 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1140,7 +1140,7 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1140} 1140}
1141 1141
1142static int 1142static int
1143dm9000_drv_suspend(struct device *dev, u32 state, u32 level) 1143dm9000_drv_suspend(struct device *dev, pm_message_t state, u32 level)
1144{ 1144{
1145 struct net_device *ndev = dev_get_drvdata(dev); 1145 struct net_device *ndev = dev_get_drvdata(dev);
1146 1146
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7d93948aec83..d6eefdb71c17 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1372,7 +1372,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1372 1372
1373 /* synchronized against open : rtnl_lock() held by caller */ 1373 /* synchronized against open : rtnl_lock() held by caller */
1374 if (netif_running(dev)) { 1374 if (netif_running(dev)) {
1375 u8 *base = get_hwbase(dev); 1375 u8 __iomem *base = get_hwbase(dev);
1376 /* 1376 /*
1377 * It seems that the nic preloads valid ring entries into an 1377 * It seems that the nic preloads valid ring entries into an
1378 * internal buffer. The procedure for flushing everything is 1378 * internal buffer. The procedure for flushing everything is
@@ -1423,7 +1423,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1423 1423
1424static void nv_copy_mac_to_hw(struct net_device *dev) 1424static void nv_copy_mac_to_hw(struct net_device *dev)
1425{ 1425{
1426 u8 *base = get_hwbase(dev); 1426 u8 __iomem *base = get_hwbase(dev);
1427 u32 mac[2]; 1427 u32 mac[2];
1428 1428
1429 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) + 1429 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index d9df1d9a5739..bc9a3bf8d560 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -204,6 +204,10 @@ KERN_INFO " Further modifications by Keith Underwood <keithu@parl.clemson.edu>
204 204
205#define RUN_AT(x) (jiffies + (x)) 205#define RUN_AT(x) (jiffies + (x))
206 206
207#ifndef ADDRLEN
208#define ADDRLEN 32
209#endif
210
207/* Condensed bus+endian portability operations. */ 211/* Condensed bus+endian portability operations. */
208#if ADDRLEN == 64 212#if ADDRLEN == 64
209#define cpu_to_leXX(addr) cpu_to_le64(addr) 213#define cpu_to_leXX(addr) cpu_to_le64(addr)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 10125a1dba22..dd89bda1f131 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -4,10 +4,10 @@
4 * Description: Driver for the SMC Infrared Communications Controller 4 * Description: Driver for the SMC Infrared Communications Controller
5 * Status: Experimental. 5 * Status: Experimental.
6 * Author: Daniele Peri (peri@csai.unipa.it) 6 * Author: Daniele Peri (peri@csai.unipa.it)
7 * Created at: 7 * Created at:
8 * Modified at: 8 * Modified at:
9 * Modified by: 9 * Modified by:
10 * 10 *
11 * Copyright (c) 2002 Daniele Peri 11 * Copyright (c) 2002 Daniele Peri
12 * All Rights Reserved. 12 * All Rights Reserved.
13 * Copyright (c) 2002 Jean Tourrilhes 13 * Copyright (c) 2002 Jean Tourrilhes
@@ -17,26 +17,26 @@
17 * 17 *
18 * Copyright (c) 2001 Stefani Seibold 18 * Copyright (c) 2001 Stefani Seibold
19 * Copyright (c) 1999-2001 Dag Brattli 19 * Copyright (c) 1999-2001 Dag Brattli
20 * Copyright (c) 1998-1999 Thomas Davis, 20 * Copyright (c) 1998-1999 Thomas Davis,
21 * 21 *
22 * and irport.c: 22 * and irport.c:
23 * 23 *
24 * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved. 24 * Copyright (c) 1997, 1998, 1999-2000 Dag Brattli, All Rights Reserved.
25 * 25 *
26 * 26 *
27 * This program is free software; you can redistribute it and/or 27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License as 28 * modify it under the terms of the GNU General Public License as
29 * published by the Free Software Foundation; either version 2 of 29 * published by the Free Software Foundation; either version 2 of
30 * the License, or (at your option) any later version. 30 * the License, or (at your option) any later version.
31 * 31 *
32 * This program is distributed in the hope that it will be useful, 32 * This program is distributed in the hope that it will be useful,
33 * but WITHOUT ANY WARRANTY; without even the implied warranty of 33 * but WITHOUT ANY WARRANTY; without even the implied warranty of
34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 34 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
35 * GNU General Public License for more details. 35 * GNU General Public License for more details.
36 * 36 *
37 * You should have received a copy of the GNU General Public License 37 * You should have received a copy of the GNU General Public License
38 * along with this program; if not, write to the Free Software 38 * along with this program; if not, write to the Free Software
39 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 39 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
40 * MA 02111-1307 USA 40 * MA 02111-1307 USA
41 * 41 *
42 ********************************************************************/ 42 ********************************************************************/
@@ -68,24 +68,42 @@
68#include "smsc-ircc2.h" 68#include "smsc-ircc2.h"
69#include "smsc-sio.h" 69#include "smsc-sio.h"
70 70
71
72MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
73MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
74MODULE_LICENSE("GPL");
75
76static int ircc_dma = 255;
77module_param(ircc_dma, int, 0);
78MODULE_PARM_DESC(ircc_dma, "DMA channel");
79
80static int ircc_irq = 255;
81module_param(ircc_irq, int, 0);
82MODULE_PARM_DESC(ircc_irq, "IRQ line");
83
84static int ircc_fir;
85module_param(ircc_fir, int, 0);
86MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
87
88static int ircc_sir;
89module_param(ircc_sir, int, 0);
90MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
91
92static int ircc_cfg;
93module_param(ircc_cfg, int, 0);
94MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
95
96static int ircc_transceiver;
97module_param(ircc_transceiver, int, 0);
98MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
99
71/* Types */ 100/* Types */
72 101
73struct smsc_transceiver { 102struct smsc_transceiver {
74 char *name; 103 char *name;
75 void (*set_for_speed)(int fir_base, u32 speed); 104 void (*set_for_speed)(int fir_base, u32 speed);
76 int (*probe)(int fir_base); 105 int (*probe)(int fir_base);
77}; 106};
78typedef struct smsc_transceiver smsc_transceiver_t;
79
80#if 0
81struct smc_chip {
82 char *name;
83 u16 flags;
84 u8 devid;
85 u8 rev;
86};
87typedef struct smc_chip smc_chip_t;
88#endif
89 107
90struct smsc_chip { 108struct smsc_chip {
91 char *name; 109 char *name;
@@ -96,20 +114,18 @@ struct smsc_chip {
96 u8 devid; 114 u8 devid;
97 u8 rev; 115 u8 rev;
98}; 116};
99typedef struct smsc_chip smsc_chip_t;
100 117
101struct smsc_chip_address { 118struct smsc_chip_address {
102 unsigned int cfg_base; 119 unsigned int cfg_base;
103 unsigned int type; 120 unsigned int type;
104}; 121};
105typedef struct smsc_chip_address smsc_chip_address_t;
106 122
107/* Private data for each instance */ 123/* Private data for each instance */
108struct smsc_ircc_cb { 124struct smsc_ircc_cb {
109 struct net_device *netdev; /* Yes! we are some kind of netdevice */ 125 struct net_device *netdev; /* Yes! we are some kind of netdevice */
110 struct net_device_stats stats; 126 struct net_device_stats stats;
111 struct irlap_cb *irlap; /* The link layer we are binded to */ 127 struct irlap_cb *irlap; /* The link layer we are binded to */
112 128
113 chipio_t io; /* IrDA controller information */ 129 chipio_t io; /* IrDA controller information */
114 iobuff_t tx_buff; /* Transmit buffer */ 130 iobuff_t tx_buff; /* Transmit buffer */
115 iobuff_t rx_buff; /* Receive buffer */ 131 iobuff_t rx_buff; /* Receive buffer */
@@ -119,7 +135,7 @@ struct smsc_ircc_cb {
119 struct qos_info qos; /* QoS capabilities for this device */ 135 struct qos_info qos; /* QoS capabilities for this device */
120 136
121 spinlock_t lock; /* For serializing operations */ 137 spinlock_t lock; /* For serializing operations */
122 138
123 __u32 new_speed; 139 __u32 new_speed;
124 __u32 flags; /* Interface flags */ 140 __u32 flags; /* Interface flags */
125 141
@@ -127,18 +143,20 @@ struct smsc_ircc_cb {
127 int tx_len; /* Number of frames in tx_buff */ 143 int tx_len; /* Number of frames in tx_buff */
128 144
129 int transceiver; 145 int transceiver;
130 struct pm_dev *pmdev; 146 struct platform_device *pldev;
131}; 147};
132 148
133/* Constants */ 149/* Constants */
134 150
135static const char *driver_name = "smsc-ircc2"; 151#define SMSC_IRCC2_DRIVER_NAME "smsc-ircc2"
136#define DIM(x) (sizeof(x)/(sizeof(*(x)))) 152
137#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600 153#define SMSC_IRCC2_C_IRDA_FALLBACK_SPEED 9600
138#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1 154#define SMSC_IRCC2_C_DEFAULT_TRANSCEIVER 1
139#define SMSC_IRCC2_C_NET_TIMEOUT 0 155#define SMSC_IRCC2_C_NET_TIMEOUT 0
140#define SMSC_IRCC2_C_SIR_STOP 0 156#define SMSC_IRCC2_C_SIR_STOP 0
141 157
158static const char *driver_name = SMSC_IRCC2_DRIVER_NAME;
159
142/* Prototypes */ 160/* Prototypes */
143 161
144static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq); 162static int smsc_ircc_open(unsigned int firbase, unsigned int sirbase, u8 dma, u8 irq);
@@ -147,15 +165,15 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, unsigned int fir_base,
147static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self); 165static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self);
148static void smsc_ircc_init_chip(struct smsc_ircc_cb *self); 166static void smsc_ircc_init_chip(struct smsc_ircc_cb *self);
149static int __exit smsc_ircc_close(struct smsc_ircc_cb *self); 167static int __exit smsc_ircc_close(struct smsc_ircc_cb *self);
150static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase); 168static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self);
151static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase); 169static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self);
152static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self); 170static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self);
153static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev); 171static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev);
154static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev); 172static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev);
155static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs); 173static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs);
156static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase); 174static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self);
157static void smsc_ircc_change_speed(void *priv, u32 speed); 175static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed);
158static void smsc_ircc_set_sir_speed(void *priv, u32 speed); 176static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, u32 speed);
159static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs); 177static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
160static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev); 178static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev);
161static void smsc_ircc_sir_start(struct smsc_ircc_cb *self); 179static void smsc_ircc_sir_start(struct smsc_ircc_cb *self);
@@ -171,7 +189,6 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cm
171static void smsc_ircc_timeout(struct net_device *dev); 189static void smsc_ircc_timeout(struct net_device *dev);
172#endif 190#endif
173static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev); 191static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev);
174static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
175static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self); 192static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self);
176static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self); 193static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self);
177static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed); 194static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed);
@@ -179,9 +196,9 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
179 196
180/* Probing */ 197/* Probing */
181static int __init smsc_ircc_look_for_chips(void); 198static int __init smsc_ircc_look_for_chips(void);
182static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type); 199static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
183static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg_base, char *type); 200static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
184static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type); 201static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
185static int __init smsc_superio_fdc(unsigned short cfg_base); 202static int __init smsc_superio_fdc(unsigned short cfg_base);
186static int __init smsc_superio_lpc(unsigned short cfg_base); 203static int __init smsc_superio_lpc(unsigned short cfg_base);
187 204
@@ -196,21 +213,26 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
196 213
197/* Power Management */ 214/* Power Management */
198 215
199static void smsc_ircc_suspend(struct smsc_ircc_cb *self); 216static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level);
200static void smsc_ircc_wakeup(struct smsc_ircc_cb *self); 217static int smsc_ircc_resume(struct device *dev, u32 level);
201static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data);
202 218
219static struct device_driver smsc_ircc_driver = {
220 .name = SMSC_IRCC2_DRIVER_NAME,
221 .bus = &platform_bus_type,
222 .suspend = smsc_ircc_suspend,
223 .resume = smsc_ircc_resume,
224};
203 225
204/* Transceivers for SMSC-ircc */ 226/* Transceivers for SMSC-ircc */
205 227
206static smsc_transceiver_t smsc_transceivers[]= 228static struct smsc_transceiver smsc_transceivers[] =
207{ 229{
208 { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800}, 230 { "Toshiba Satellite 1800 (GP data pin select)", smsc_ircc_set_transceiver_toshiba_sat1800, smsc_ircc_probe_transceiver_toshiba_sat1800 },
209 { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select}, 231 { "Fast pin select", smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select, smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select },
210 { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc}, 232 { "ATC IRMode", smsc_ircc_set_transceiver_smsc_ircc_atc, smsc_ircc_probe_transceiver_smsc_ircc_atc },
211 { NULL, NULL} 233 { NULL, NULL }
212}; 234};
213#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (DIM(smsc_transceivers)-1) 235#define SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS (ARRAY_SIZE(smsc_transceivers) - 1)
214 236
215/* SMC SuperIO chipsets definitions */ 237/* SMC SuperIO chipsets definitions */
216 238
@@ -221,7 +243,7 @@ static smsc_transceiver_t smsc_transceivers[]=
221#define FIR 4 /* SuperIO Chip has fast IRDA */ 243#define FIR 4 /* SuperIO Chip has fast IRDA */
222#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */ 244#define SERx4 8 /* SuperIO Chip supports 115,2 KBaud * 4=460,8 KBaud */
223 245
224static smsc_chip_t __initdata fdc_chips_flat[]= 246static struct smsc_chip __initdata fdc_chips_flat[] =
225{ 247{
226 /* Base address 0x3f0 or 0x370 */ 248 /* Base address 0x3f0 or 0x370 */
227 { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */ 249 { "37C44", KEY55_1|NoIRDA, 0x00, 0x00 }, /* This chip cannot be detected */
@@ -235,7 +257,7 @@ static smsc_chip_t __initdata fdc_chips_flat[]=
235 { NULL } 257 { NULL }
236}; 258};
237 259
238static smsc_chip_t __initdata fdc_chips_paged[]= 260static struct smsc_chip __initdata fdc_chips_paged[] =
239{ 261{
240 /* Base address 0x3f0 or 0x370 */ 262 /* Base address 0x3f0 or 0x370 */
241 { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 }, 263 { "37B72X", KEY55_1|SIR|SERx4, 0x4c, 0x00 },
@@ -254,7 +276,7 @@ static smsc_chip_t __initdata fdc_chips_paged[]=
254 { NULL } 276 { NULL }
255}; 277};
256 278
257static smsc_chip_t __initdata lpc_chips_flat[]= 279static struct smsc_chip __initdata lpc_chips_flat[] =
258{ 280{
259 /* Base address 0x2E or 0x4E */ 281 /* Base address 0x2E or 0x4E */
260 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 }, 282 { "47N227", KEY55_1|FIR|SERx4, 0x5a, 0x00 },
@@ -262,7 +284,7 @@ static smsc_chip_t __initdata lpc_chips_flat[]=
262 { NULL } 284 { NULL }
263}; 285};
264 286
265static smsc_chip_t __initdata lpc_chips_paged[]= 287static struct smsc_chip __initdata lpc_chips_paged[] =
266{ 288{
267 /* Base address 0x2E or 0x4E */ 289 /* Base address 0x2E or 0x4E */
268 { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 }, 290 { "47B27X", KEY55_1|SIR|SERx4, 0x51, 0x00 },
@@ -281,33 +303,25 @@ static smsc_chip_t __initdata lpc_chips_paged[]=
281#define SMSCSIO_TYPE_FLAT 4 303#define SMSCSIO_TYPE_FLAT 4
282#define SMSCSIO_TYPE_PAGED 8 304#define SMSCSIO_TYPE_PAGED 8
283 305
284static smsc_chip_address_t __initdata possible_addresses[]= 306static struct smsc_chip_address __initdata possible_addresses[] =
285{ 307{
286 {0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 308 { 0x3f0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
287 {0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 309 { 0x370, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
288 {0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 310 { 0xe0, SMSCSIO_TYPE_FDC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
289 {0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 311 { 0x2e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
290 {0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED}, 312 { 0x4e, SMSCSIO_TYPE_LPC|SMSCSIO_TYPE_FLAT|SMSCSIO_TYPE_PAGED },
291 {0,0} 313 { 0, 0 }
292}; 314};
293 315
294/* Globals */ 316/* Globals */
295 317
296static struct smsc_ircc_cb *dev_self[] = { NULL, NULL}; 318static struct smsc_ircc_cb *dev_self[] = { NULL, NULL };
297 319static unsigned short dev_count;
298static int ircc_irq=255;
299static int ircc_dma=255;
300static int ircc_fir=0;
301static int ircc_sir=0;
302static int ircc_cfg=0;
303static int ircc_transceiver=0;
304
305static unsigned short dev_count=0;
306 320
307static inline void register_bank(int iobase, int bank) 321static inline void register_bank(int iobase, int bank)
308{ 322{
309 outb(((inb(iobase+IRCC_MASTER) & 0xf0) | (bank & 0x07)), 323 outb(((inb(iobase + IRCC_MASTER) & 0xf0) | (bank & 0x07)),
310 iobase+IRCC_MASTER); 324 iobase + IRCC_MASTER);
311} 325}
312 326
313 327
@@ -327,34 +341,44 @@ static inline void register_bank(int iobase, int bank)
327 */ 341 */
328static int __init smsc_ircc_init(void) 342static int __init smsc_ircc_init(void)
329{ 343{
330 int ret=-ENODEV; 344 int ret;
331 345
332 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 346 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
333 347
334 dev_count=0; 348 ret = driver_register(&smsc_ircc_driver);
335 349 if (ret) {
336 if ((ircc_fir>0)&&(ircc_sir>0)) { 350 IRDA_ERROR("%s, Can't register driver!\n", driver_name);
351 return ret;
352 }
353
354 dev_count = 0;
355
356 if (ircc_fir > 0 && ircc_sir > 0) {
337 IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir); 357 IRDA_MESSAGE(" Overriding FIR address 0x%04x\n", ircc_fir);
338 IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir); 358 IRDA_MESSAGE(" Overriding SIR address 0x%04x\n", ircc_sir);
339 359
340 if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq) == 0) 360 if (smsc_ircc_open(ircc_fir, ircc_sir, ircc_dma, ircc_irq))
341 return 0; 361 ret = -ENODEV;
342 362 } else {
343 return -ENODEV; 363 ret = -ENODEV;
344 } 364
365 /* try user provided configuration register base address */
366 if (ircc_cfg > 0) {
367 IRDA_MESSAGE(" Overriding configuration address "
368 "0x%04x\n", ircc_cfg);
369 if (!smsc_superio_fdc(ircc_cfg))
370 ret = 0;
371 if (!smsc_superio_lpc(ircc_cfg))
372 ret = 0;
373 }
345 374
346 /* try user provided configuration register base address */ 375 if (smsc_ircc_look_for_chips() > 0)
347 if (ircc_cfg>0) {
348 IRDA_MESSAGE(" Overriding configuration address 0x%04x\n",
349 ircc_cfg);
350 if (!smsc_superio_fdc(ircc_cfg))
351 ret = 0;
352 if (!smsc_superio_lpc(ircc_cfg))
353 ret = 0; 376 ret = 0;
354 } 377 }
355 378
356 if(smsc_ircc_look_for_chips()>0) ret = 0; 379 if (ret)
357 380 driver_unregister(&smsc_ircc_driver);
381
358 return ret; 382 return ret;
359} 383}
360 384
@@ -369,15 +393,15 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
369 struct smsc_ircc_cb *self; 393 struct smsc_ircc_cb *self;
370 struct net_device *dev; 394 struct net_device *dev;
371 int err; 395 int err;
372 396
373 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 397 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
374 398
375 err = smsc_ircc_present(fir_base, sir_base); 399 err = smsc_ircc_present(fir_base, sir_base);
376 if(err) 400 if (err)
377 goto err_out; 401 goto err_out;
378 402
379 err = -ENOMEM; 403 err = -ENOMEM;
380 if (dev_count > DIM(dev_self)) { 404 if (dev_count >= ARRAY_SIZE(dev_self)) {
381 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__); 405 IRDA_WARNING("%s(), too many devices!\n", __FUNCTION__);
382 goto err_out1; 406 goto err_out1;
383 } 407 }
@@ -396,14 +420,14 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
396 dev->hard_start_xmit = smsc_ircc_hard_xmit_sir; 420 dev->hard_start_xmit = smsc_ircc_hard_xmit_sir;
397#if SMSC_IRCC2_C_NET_TIMEOUT 421#if SMSC_IRCC2_C_NET_TIMEOUT
398 dev->tx_timeout = smsc_ircc_timeout; 422 dev->tx_timeout = smsc_ircc_timeout;
399 dev->watchdog_timeo = HZ*2; /* Allow enough time for speed change */ 423 dev->watchdog_timeo = HZ * 2; /* Allow enough time for speed change */
400#endif 424#endif
401 dev->open = smsc_ircc_net_open; 425 dev->open = smsc_ircc_net_open;
402 dev->stop = smsc_ircc_net_close; 426 dev->stop = smsc_ircc_net_close;
403 dev->do_ioctl = smsc_ircc_net_ioctl; 427 dev->do_ioctl = smsc_ircc_net_ioctl;
404 dev->get_stats = smsc_ircc_net_get_stats; 428 dev->get_stats = smsc_ircc_net_get_stats;
405 429
406 self = dev->priv; 430 self = netdev_priv(dev);
407 self->netdev = dev; 431 self->netdev = dev;
408 432
409 /* Make ifconfig display some details */ 433 /* Make ifconfig display some details */
@@ -411,10 +435,10 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
411 dev->irq = self->io.irq = irq; 435 dev->irq = self->io.irq = irq;
412 436
413 /* Need to store self somewhere */ 437 /* Need to store self somewhere */
414 dev_self[dev_count++] = self; 438 dev_self[dev_count] = self;
415 spin_lock_init(&self->lock); 439 spin_lock_init(&self->lock);
416 440
417 self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE; 441 self->rx_buff.truesize = SMSC_IRCC2_RX_BUFF_TRUESIZE;
418 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE; 442 self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
419 443
420 self->rx_buff.head = 444 self->rx_buff.head =
@@ -442,33 +466,40 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
442 self->rx_buff.state = OUTSIDE_FRAME; 466 self->rx_buff.state = OUTSIDE_FRAME;
443 self->tx_buff.data = self->tx_buff.head; 467 self->tx_buff.data = self->tx_buff.head;
444 self->rx_buff.data = self->rx_buff.head; 468 self->rx_buff.data = self->rx_buff.head;
445
446 smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
447 469
470 smsc_ircc_setup_io(self, fir_base, sir_base, dma, irq);
448 smsc_ircc_setup_qos(self); 471 smsc_ircc_setup_qos(self);
449
450 smsc_ircc_init_chip(self); 472 smsc_ircc_init_chip(self);
451 473
452 if(ircc_transceiver > 0 && 474 if (ircc_transceiver > 0 &&
453 ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS) 475 ircc_transceiver < SMSC_IRCC2_C_NUMBER_OF_TRANSCEIVERS)
454 self->transceiver = ircc_transceiver; 476 self->transceiver = ircc_transceiver;
455 else 477 else
456 smsc_ircc_probe_transceiver(self); 478 smsc_ircc_probe_transceiver(self);
457 479
458 err = register_netdev(self->netdev); 480 err = register_netdev(self->netdev);
459 if(err) { 481 if (err) {
460 IRDA_ERROR("%s, Network device registration failed!\n", 482 IRDA_ERROR("%s, Network device registration failed!\n",
461 driver_name); 483 driver_name);
462 goto err_out4; 484 goto err_out4;
463 } 485 }
464 486
465 self->pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, smsc_ircc_pmproc); 487 self->pldev = platform_device_register_simple(SMSC_IRCC2_DRIVER_NAME,
466 if (self->pmdev) 488 dev_count, NULL, 0);
467 self->pmdev->data = self; 489 if (IS_ERR(self->pldev)) {
490 err = PTR_ERR(self->pldev);
491 goto err_out5;
492 }
493 dev_set_drvdata(&self->pldev->dev, self);
468 494
469 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name); 495 IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
496 dev_count++;
470 497
471 return 0; 498 return 0;
499
500 err_out5:
501 unregister_netdev(self->netdev);
502
472 err_out4: 503 err_out4:
473 dma_free_coherent(NULL, self->tx_buff.truesize, 504 dma_free_coherent(NULL, self->tx_buff.truesize,
474 self->tx_buff.head, self->tx_buff_dma); 505 self->tx_buff.head, self->tx_buff_dma);
@@ -477,7 +508,7 @@ static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u
477 self->rx_buff.head, self->rx_buff_dma); 508 self->rx_buff.head, self->rx_buff_dma);
478 err_out2: 509 err_out2:
479 free_netdev(self->netdev); 510 free_netdev(self->netdev);
480 dev_self[--dev_count] = NULL; 511 dev_self[dev_count] = NULL;
481 err_out1: 512 err_out1:
482 release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT); 513 release_region(fir_base, SMSC_IRCC2_FIR_CHIP_IO_EXTENT);
483 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT); 514 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
@@ -511,16 +542,16 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
511 542
512 register_bank(fir_base, 3); 543 register_bank(fir_base, 3);
513 544
514 high = inb(fir_base+IRCC_ID_HIGH); 545 high = inb(fir_base + IRCC_ID_HIGH);
515 low = inb(fir_base+IRCC_ID_LOW); 546 low = inb(fir_base + IRCC_ID_LOW);
516 chip = inb(fir_base+IRCC_CHIP_ID); 547 chip = inb(fir_base + IRCC_CHIP_ID);
517 version = inb(fir_base+IRCC_VERSION); 548 version = inb(fir_base + IRCC_VERSION);
518 config = inb(fir_base+IRCC_INTERFACE); 549 config = inb(fir_base + IRCC_INTERFACE);
519 dma = config & IRCC_INTERFACE_DMA_MASK; 550 dma = config & IRCC_INTERFACE_DMA_MASK;
520 irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4; 551 irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
521 552
522 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) { 553 if (high != 0x10 || low != 0xb8 || (chip != 0xf1 && chip != 0xf2)) {
523 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n", 554 IRDA_WARNING("%s(), addr 0x%04x - no device found!\n",
524 __FUNCTION__, fir_base); 555 __FUNCTION__, fir_base);
525 goto out3; 556 goto out3;
526 } 557 }
@@ -529,6 +560,7 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
529 chip & 0x0f, version, fir_base, sir_base, dma, irq); 560 chip & 0x0f, version, fir_base, sir_base, dma, irq);
530 561
531 return 0; 562 return 0;
563
532 out3: 564 out3:
533 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT); 565 release_region(sir_base, SMSC_IRCC2_SIR_CHIP_IO_EXTENT);
534 out2: 566 out2:
@@ -543,16 +575,16 @@ static int smsc_ircc_present(unsigned int fir_base, unsigned int sir_base)
543 * Setup I/O 575 * Setup I/O
544 * 576 *
545 */ 577 */
546static void smsc_ircc_setup_io(struct smsc_ircc_cb *self, 578static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
547 unsigned int fir_base, unsigned int sir_base, 579 unsigned int fir_base, unsigned int sir_base,
548 u8 dma, u8 irq) 580 u8 dma, u8 irq)
549{ 581{
550 unsigned char config, chip_dma, chip_irq; 582 unsigned char config, chip_dma, chip_irq;
551 583
552 register_bank(fir_base, 3); 584 register_bank(fir_base, 3);
553 config = inb(fir_base+IRCC_INTERFACE); 585 config = inb(fir_base + IRCC_INTERFACE);
554 chip_dma = config & IRCC_INTERFACE_DMA_MASK; 586 chip_dma = config & IRCC_INTERFACE_DMA_MASK;
555 chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4; 587 chip_irq = (config & IRCC_INTERFACE_IRQ_MASK) >> 4;
556 588
557 self->io.fir_base = fir_base; 589 self->io.fir_base = fir_base;
558 self->io.sir_base = sir_base; 590 self->io.sir_base = sir_base;
@@ -566,17 +598,15 @@ static void smsc_ircc_setup_io(struct smsc_ircc_cb *self,
566 IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n", 598 IRDA_MESSAGE("%s, Overriding IRQ - chip says %d, using %d\n",
567 driver_name, chip_irq, irq); 599 driver_name, chip_irq, irq);
568 self->io.irq = irq; 600 self->io.irq = irq;
569 } 601 } else
570 else
571 self->io.irq = chip_irq; 602 self->io.irq = chip_irq;
572 603
573 if (dma < 255) { 604 if (dma < 255) {
574 if (dma != chip_dma) 605 if (dma != chip_dma)
575 IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n", 606 IRDA_MESSAGE("%s, Overriding DMA - chip says %d, using %d\n",
576 driver_name, chip_dma, dma); 607 driver_name, chip_dma, dma);
577 self->io.dma = dma; 608 self->io.dma = dma;
578 } 609 } else
579 else
580 self->io.dma = chip_dma; 610 self->io.dma = chip_dma;
581 611
582} 612}
@@ -591,7 +621,7 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
591{ 621{
592 /* Initialize QoS for this device */ 622 /* Initialize QoS for this device */
593 irda_init_max_qos_capabilies(&self->qos); 623 irda_init_max_qos_capabilies(&self->qos);
594 624
595 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| 625 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
596 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); 626 IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
597 627
@@ -608,43 +638,43 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
608 */ 638 */
609static void smsc_ircc_init_chip(struct smsc_ircc_cb *self) 639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
610{ 640{
611 int iobase, ir_mode, ctrl, fast; 641 int iobase, ir_mode, ctrl, fast;
612 642
613 IRDA_ASSERT( self != NULL, return; ); 643 IRDA_ASSERT(self != NULL, return;);
614 iobase = self->io.fir_base;
615 644
645 iobase = self->io.fir_base;
616 ir_mode = IRCC_CFGA_IRDA_SIR_A; 646 ir_mode = IRCC_CFGA_IRDA_SIR_A;
617 ctrl = 0; 647 ctrl = 0;
618 fast = 0; 648 fast = 0;
619 649
620 register_bank(iobase, 0); 650 register_bank(iobase, 0);
621 outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER); 651 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
622 outb(0x00, iobase+IRCC_MASTER); 652 outb(0x00, iobase + IRCC_MASTER);
623 653
624 register_bank(iobase, 1); 654 register_bank(iobase, 1);
625 outb(((inb(iobase+IRCC_SCE_CFGA) & 0x87) | ir_mode), 655 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | ir_mode),
626 iobase+IRCC_SCE_CFGA); 656 iobase + IRCC_SCE_CFGA);
627 657
628#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */ 658#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
629 outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM), 659 outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
630 iobase+IRCC_SCE_CFGB); 660 iobase + IRCC_SCE_CFGB);
631#else 661#else
632 outb(((inb(iobase+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR), 662 outb(((inb(iobase + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
633 iobase+IRCC_SCE_CFGB); 663 iobase + IRCC_SCE_CFGB);
634#endif 664#endif
635 (void) inb(iobase+IRCC_FIFO_THRESHOLD); 665 (void) inb(iobase + IRCC_FIFO_THRESHOLD);
636 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase+IRCC_FIFO_THRESHOLD); 666 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
637 667
638 register_bank(iobase, 4); 668 register_bank(iobase, 4);
639 outb((inb(iobase+IRCC_CONTROL) & 0x30) | ctrl, iobase+IRCC_CONTROL); 669 outb((inb(iobase + IRCC_CONTROL) & 0x30) | ctrl, iobase + IRCC_CONTROL);
640 670
641 register_bank(iobase, 0); 671 register_bank(iobase, 0);
642 outb(fast, iobase+IRCC_LCR_A); 672 outb(fast, iobase + IRCC_LCR_A);
643 673
644 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 674 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
645 675
646 /* Power on device */ 676 /* Power on device */
647 outb(0x00, iobase+IRCC_MASTER); 677 outb(0x00, iobase + IRCC_MASTER);
648} 678}
649 679
650/* 680/*
@@ -662,12 +692,12 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
662 692
663 IRDA_ASSERT(dev != NULL, return -1;); 693 IRDA_ASSERT(dev != NULL, return -1;);
664 694
665 self = dev->priv; 695 self = netdev_priv(dev);
666 696
667 IRDA_ASSERT(self != NULL, return -1;); 697 IRDA_ASSERT(self != NULL, return -1;);
668 698
669 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd); 699 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
670 700
671 switch (cmd) { 701 switch (cmd) {
672 case SIOCSBANDWIDTH: /* Set bandwidth */ 702 case SIOCSBANDWIDTH: /* Set bandwidth */
673 if (!capable(CAP_NET_ADMIN)) 703 if (!capable(CAP_NET_ADMIN))
@@ -703,14 +733,14 @@ static int smsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd
703 default: 733 default:
704 ret = -EOPNOTSUPP; 734 ret = -EOPNOTSUPP;
705 } 735 }
706 736
707 return ret; 737 return ret;
708} 738}
709 739
710static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev) 740static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
711{ 741{
712 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) dev->priv; 742 struct smsc_ircc_cb *self = netdev_priv(dev);
713 743
714 return &self->stats; 744 return &self->stats;
715} 745}
716 746
@@ -724,11 +754,9 @@ static struct net_device_stats *smsc_ircc_net_get_stats(struct net_device *dev)
724 754
725static void smsc_ircc_timeout(struct net_device *dev) 755static void smsc_ircc_timeout(struct net_device *dev)
726{ 756{
727 struct smsc_ircc_cb *self; 757 struct smsc_ircc_cb *self = netdev_priv(dev);
728 unsigned long flags; 758 unsigned long flags;
729 759
730 self = (struct smsc_ircc_cb *) dev->priv;
731
732 IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n", 760 IRDA_WARNING("%s: transmit timed out, changing speed to: %d\n",
733 dev->name, self->io.speed); 761 dev->name, self->io.speed);
734 spin_lock_irqsave(&self->lock, flags); 762 spin_lock_irqsave(&self->lock, flags);
@@ -751,26 +779,23 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
751{ 779{
752 struct smsc_ircc_cb *self; 780 struct smsc_ircc_cb *self;
753 unsigned long flags; 781 unsigned long flags;
754 int iobase;
755 s32 speed; 782 s32 speed;
756 783
757 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 784 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
758 785
759 IRDA_ASSERT(dev != NULL, return 0;); 786 IRDA_ASSERT(dev != NULL, return 0;);
760
761 self = (struct smsc_ircc_cb *) dev->priv;
762 IRDA_ASSERT(self != NULL, return 0;);
763 787
764 iobase = self->io.sir_base; 788 self = netdev_priv(dev);
789 IRDA_ASSERT(self != NULL, return 0;);
765 790
766 netif_stop_queue(dev); 791 netif_stop_queue(dev);
767 792
768 /* Make sure test of self->io.speed & speed change are atomic */ 793 /* Make sure test of self->io.speed & speed change are atomic */
769 spin_lock_irqsave(&self->lock, flags); 794 spin_lock_irqsave(&self->lock, flags);
770 795
771 /* Check if we need to change the speed */ 796 /* Check if we need to change the speed */
772 speed = irda_get_next_speed(skb); 797 speed = irda_get_next_speed(skb);
773 if ((speed != self->io.speed) && (speed != -1)) { 798 if (speed != self->io.speed && speed != -1) {
774 /* Check for empty frame */ 799 /* Check for empty frame */
775 if (!skb->len) { 800 if (!skb->len) {
776 /* 801 /*
@@ -787,27 +812,26 @@ int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
787 spin_unlock_irqrestore(&self->lock, flags); 812 spin_unlock_irqrestore(&self->lock, flags);
788 dev_kfree_skb(skb); 813 dev_kfree_skb(skb);
789 return 0; 814 return 0;
790 } else {
791 self->new_speed = speed;
792 } 815 }
816 self->new_speed = speed;
793 } 817 }
794 818
795 /* Init tx buffer */ 819 /* Init tx buffer */
796 self->tx_buff.data = self->tx_buff.head; 820 self->tx_buff.data = self->tx_buff.head;
797 821
798 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ 822 /* Copy skb to tx_buff while wrapping, stuffing and making CRC */
799 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, 823 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
800 self->tx_buff.truesize); 824 self->tx_buff.truesize);
801 825
802 self->stats.tx_bytes += self->tx_buff.len; 826 self->stats.tx_bytes += self->tx_buff.len;
803 827
804 /* Turn on transmit finished interrupt. Will fire immediately! */ 828 /* Turn on transmit finished interrupt. Will fire immediately! */
805 outb(UART_IER_THRI, iobase+UART_IER); 829 outb(UART_IER_THRI, self->io.sir_base + UART_IER);
806 830
807 spin_unlock_irqrestore(&self->lock, flags); 831 spin_unlock_irqrestore(&self->lock, flags);
808 832
809 dev_kfree_skb(skb); 833 dev_kfree_skb(skb);
810 834
811 return 0; 835 return 0;
812} 836}
813 837
@@ -826,9 +850,9 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
826 850
827 self->io.speed = speed; 851 self->io.speed = speed;
828 852
829 switch(speed) { 853 switch (speed) {
830 default: 854 default:
831 case 576000: 855 case 576000:
832 ir_mode = IRCC_CFGA_IRDA_HDLC; 856 ir_mode = IRCC_CFGA_IRDA_HDLC;
833 ctrl = IRCC_CRC; 857 ctrl = IRCC_CRC;
834 fast = 0; 858 fast = 0;
@@ -853,14 +877,14 @@ static void smsc_ircc_set_fir_speed(struct smsc_ircc_cb *self, u32 speed)
853 Now in tranceiver! 877 Now in tranceiver!
854 /* This causes an interrupt */ 878 /* This causes an interrupt */
855 register_bank(fir_base, 0); 879 register_bank(fir_base, 0);
856 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast, fir_base+IRCC_LCR_A); 880 outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast, fir_base + IRCC_LCR_A);
857 #endif 881 #endif
858 882
859 register_bank(fir_base, 1); 883 register_bank(fir_base, 1);
860 outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base+IRCC_SCE_CFGA); 884 outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | ir_mode), fir_base + IRCC_SCE_CFGA);
861 885
862 register_bank(fir_base, 4); 886 register_bank(fir_base, 4);
863 outb((inb(fir_base+IRCC_CONTROL) & 0x30) | ctrl, fir_base+IRCC_CONTROL); 887 outb((inb(fir_base + IRCC_CONTROL) & 0x30) | ctrl, fir_base + IRCC_CONTROL);
864} 888}
865 889
866/* 890/*
@@ -885,31 +909,31 @@ static void smsc_ircc_fir_start(struct smsc_ircc_cb *self)
885 /* Reset everything */ 909 /* Reset everything */
886 910
887 /* Install FIR transmit handler */ 911 /* Install FIR transmit handler */
888 dev->hard_start_xmit = smsc_ircc_hard_xmit_fir; 912 dev->hard_start_xmit = smsc_ircc_hard_xmit_fir;
889 913
890 /* Clear FIFO */ 914 /* Clear FIFO */
891 outb(inb(fir_base+IRCC_LCR_A)|IRCC_LCR_A_FIFO_RESET, fir_base+IRCC_LCR_A); 915 outb(inb(fir_base + IRCC_LCR_A) | IRCC_LCR_A_FIFO_RESET, fir_base + IRCC_LCR_A);
892 916
893 /* Enable interrupt */ 917 /* Enable interrupt */
894 /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER);*/ 918 /*outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base + IRCC_IER);*/
895 919
896 register_bank(fir_base, 1); 920 register_bank(fir_base, 1);
897 921
898 /* Select the TX/RX interface */ 922 /* Select the TX/RX interface */
899#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */ 923#ifdef SMSC_669 /* Uses pin 88/89 for Rx/Tx */
900 outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM), 924 outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_COM),
901 fir_base+IRCC_SCE_CFGB); 925 fir_base + IRCC_SCE_CFGB);
902#else 926#else
903 outb(((inb(fir_base+IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR), 927 outb(((inb(fir_base + IRCC_SCE_CFGB) & 0x3f) | IRCC_CFGB_MUX_IR),
904 fir_base+IRCC_SCE_CFGB); 928 fir_base + IRCC_SCE_CFGB);
905#endif 929#endif
906 (void) inb(fir_base+IRCC_FIFO_THRESHOLD); 930 (void) inb(fir_base + IRCC_FIFO_THRESHOLD);
907 931
908 /* Enable SCE interrupts */ 932 /* Enable SCE interrupts */
909 outb(0, fir_base+IRCC_MASTER); 933 outb(0, fir_base + IRCC_MASTER);
910 register_bank(fir_base, 0); 934 register_bank(fir_base, 0);
911 outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, fir_base+IRCC_IER); 935 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, fir_base + IRCC_IER);
912 outb(IRCC_MASTER_INT_EN, fir_base+IRCC_MASTER); 936 outb(IRCC_MASTER_INT_EN, fir_base + IRCC_MASTER);
913} 937}
914 938
915/* 939/*
@@ -923,13 +947,13 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
923 int fir_base; 947 int fir_base;
924 948
925 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 949 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
926 950
927 IRDA_ASSERT(self != NULL, return;); 951 IRDA_ASSERT(self != NULL, return;);
928 952
929 fir_base = self->io.fir_base; 953 fir_base = self->io.fir_base;
930 register_bank(fir_base, 0); 954 register_bank(fir_base, 0);
931 /*outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER);*/ 955 /*outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);*/
932 outb(inb(fir_base+IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base+IRCC_LCR_B); 956 outb(inb(fir_base + IRCC_LCR_B) & IRCC_LCR_B_SIP_ENABLE, fir_base + IRCC_LCR_B);
933} 957}
934 958
935 959
@@ -941,18 +965,15 @@ static void smsc_ircc_fir_stop(struct smsc_ircc_cb *self)
941 * This function *must* be called with spinlock held, because it may 965 * This function *must* be called with spinlock held, because it may
942 * be called from the irq handler. - Jean II 966 * be called from the irq handler. - Jean II
943 */ 967 */
944static void smsc_ircc_change_speed(void *priv, u32 speed) 968static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
945{ 969{
946 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv;
947 struct net_device *dev; 970 struct net_device *dev;
948 int iobase;
949 int last_speed_was_sir; 971 int last_speed_was_sir;
950 972
951 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed); 973 IRDA_DEBUG(0, "%s() changing speed to: %d\n", __FUNCTION__, speed);
952 974
953 IRDA_ASSERT(self != NULL, return;); 975 IRDA_ASSERT(self != NULL, return;);
954 dev = self->netdev; 976 dev = self->netdev;
955 iobase = self->io.fir_base;
956 977
957 last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED; 978 last_speed_was_sir = self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED;
958 979
@@ -961,30 +982,30 @@ static void smsc_ircc_change_speed(void *priv, u32 speed)
961 speed= 1152000; 982 speed= 1152000;
962 self->io.speed = speed; 983 self->io.speed = speed;
963 last_speed_was_sir = 0; 984 last_speed_was_sir = 0;
964 smsc_ircc_fir_start(self); 985 smsc_ircc_fir_start(self);
965 #endif 986 #endif
966 987
967 if(self->io.speed == 0) 988 if (self->io.speed == 0)
968 smsc_ircc_sir_start(self); 989 smsc_ircc_sir_start(self);
969 990
970 #if 0 991 #if 0
971 if(!last_speed_was_sir) speed = self->io.speed; 992 if (!last_speed_was_sir) speed = self->io.speed;
972 #endif 993 #endif
973 994
974 if(self->io.speed != speed) smsc_ircc_set_transceiver_for_speed(self, speed); 995 if (self->io.speed != speed)
996 smsc_ircc_set_transceiver_for_speed(self, speed);
975 997
976 self->io.speed = speed; 998 self->io.speed = speed;
977 999
978 if(speed <= SMSC_IRCC2_MAX_SIR_SPEED) { 1000 if (speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
979 if(!last_speed_was_sir) { 1001 if (!last_speed_was_sir) {
980 smsc_ircc_fir_stop(self); 1002 smsc_ircc_fir_stop(self);
981 smsc_ircc_sir_start(self); 1003 smsc_ircc_sir_start(self);
982 } 1004 }
983 smsc_ircc_set_sir_speed(self, speed); 1005 smsc_ircc_set_sir_speed(self, speed);
984 } 1006 } else {
985 else { 1007 if (last_speed_was_sir) {
986 if(last_speed_was_sir) { 1008 #if SMSC_IRCC2_C_SIR_STOP
987 #if SMSC_IRCC2_C_SIR_STOP
988 smsc_ircc_sir_stop(self); 1009 smsc_ircc_sir_stop(self);
989 #endif 1010 #endif
990 smsc_ircc_fir_start(self); 1011 smsc_ircc_fir_start(self);
@@ -994,13 +1015,13 @@ static void smsc_ircc_change_speed(void *priv, u32 speed)
994 #if 0 1015 #if 0
995 self->tx_buff.len = 10; 1016 self->tx_buff.len = 10;
996 self->tx_buff.data = self->tx_buff.head; 1017 self->tx_buff.data = self->tx_buff.head;
997 1018
998 smsc_ircc_dma_xmit(self, iobase, 4000); 1019 smsc_ircc_dma_xmit(self, 4000);
999 #endif 1020 #endif
1000 /* Be ready for incoming frames */ 1021 /* Be ready for incoming frames */
1001 smsc_ircc_dma_receive(self, iobase); 1022 smsc_ircc_dma_receive(self);
1002 } 1023 }
1003 1024
1004 netif_wake_queue(dev); 1025 netif_wake_queue(dev);
1005} 1026}
1006 1027
@@ -1010,10 +1031,9 @@ static void smsc_ircc_change_speed(void *priv, u32 speed)
1010 * Set speed of IrDA port to specified baudrate 1031 * Set speed of IrDA port to specified baudrate
1011 * 1032 *
1012 */ 1033 */
1013void smsc_ircc_set_sir_speed(void *priv, __u32 speed) 1034void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1014{ 1035{
1015 struct smsc_ircc_cb *self = (struct smsc_ircc_cb *) priv; 1036 int iobase;
1016 int iobase;
1017 int fcr; /* FIFO control reg */ 1037 int fcr; /* FIFO control reg */
1018 int lcr; /* Line control reg */ 1038 int lcr; /* Line control reg */
1019 int divisor; 1039 int divisor;
@@ -1022,38 +1042,36 @@ void smsc_ircc_set_sir_speed(void *priv, __u32 speed)
1022 1042
1023 IRDA_ASSERT(self != NULL, return;); 1043 IRDA_ASSERT(self != NULL, return;);
1024 iobase = self->io.sir_base; 1044 iobase = self->io.sir_base;
1025 1045
1026 /* Update accounting for new speed */ 1046 /* Update accounting for new speed */
1027 self->io.speed = speed; 1047 self->io.speed = speed;
1028 1048
1029 /* Turn off interrupts */ 1049 /* Turn off interrupts */
1030 outb(0, iobase+UART_IER); 1050 outb(0, iobase + UART_IER);
1051
1052 divisor = SMSC_IRCC2_MAX_SIR_SPEED / speed;
1031 1053
1032 divisor = SMSC_IRCC2_MAX_SIR_SPEED/speed;
1033
1034 fcr = UART_FCR_ENABLE_FIFO; 1054 fcr = UART_FCR_ENABLE_FIFO;
1035 1055
1036 /* 1056 /*
1037 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and 1057 * Use trigger level 1 to avoid 3 ms. timeout delay at 9600 bps, and
1038 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget 1058 * almost 1,7 ms at 19200 bps. At speeds above that we can just forget
1039 * about this timeout since it will always be fast enough. 1059 * about this timeout since it will always be fast enough.
1040 */ 1060 */
1041 if (self->io.speed < 38400) 1061 fcr |= self->io.speed < 38400 ?
1042 fcr |= UART_FCR_TRIGGER_1; 1062 UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
1043 else 1063
1044 fcr |= UART_FCR_TRIGGER_14;
1045
1046 /* IrDA ports use 8N1 */ 1064 /* IrDA ports use 8N1 */
1047 lcr = UART_LCR_WLEN8; 1065 lcr = UART_LCR_WLEN8;
1048 1066
1049 outb(UART_LCR_DLAB | lcr, iobase+UART_LCR); /* Set DLAB */ 1067 outb(UART_LCR_DLAB | lcr, iobase + UART_LCR); /* Set DLAB */
1050 outb(divisor & 0xff, iobase+UART_DLL); /* Set speed */ 1068 outb(divisor & 0xff, iobase + UART_DLL); /* Set speed */
1051 outb(divisor >> 8, iobase+UART_DLM); 1069 outb(divisor >> 8, iobase + UART_DLM);
1052 outb(lcr, iobase+UART_LCR); /* Set 8N1 */ 1070 outb(lcr, iobase + UART_LCR); /* Set 8N1 */
1053 outb(fcr, iobase+UART_FCR); /* Enable FIFO's */ 1071 outb(fcr, iobase + UART_FCR); /* Enable FIFO's */
1054 1072
1055 /* Turn on interrups */ 1073 /* Turn on interrups */
1056 outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, iobase+UART_IER); 1074 outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
1057 1075
1058 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed); 1076 IRDA_DEBUG(2, "%s() speed changed to: %d\n", __FUNCTION__, speed);
1059} 1077}
@@ -1070,15 +1088,12 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1070 struct smsc_ircc_cb *self; 1088 struct smsc_ircc_cb *self;
1071 unsigned long flags; 1089 unsigned long flags;
1072 s32 speed; 1090 s32 speed;
1073 int iobase;
1074 int mtt; 1091 int mtt;
1075 1092
1076 IRDA_ASSERT(dev != NULL, return 0;); 1093 IRDA_ASSERT(dev != NULL, return 0;);
1077 self = (struct smsc_ircc_cb *) dev->priv; 1094 self = netdev_priv(dev);
1078 IRDA_ASSERT(self != NULL, return 0;); 1095 IRDA_ASSERT(self != NULL, return 0;);
1079 1096
1080 iobase = self->io.fir_base;
1081
1082 netif_stop_queue(dev); 1097 netif_stop_queue(dev);
1083 1098
1084 /* Make sure test of self->io.speed & speed change are atomic */ 1099 /* Make sure test of self->io.speed & speed change are atomic */
@@ -1086,30 +1101,31 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1086 1101
1087 /* Check if we need to change the speed after this frame */ 1102 /* Check if we need to change the speed after this frame */
1088 speed = irda_get_next_speed(skb); 1103 speed = irda_get_next_speed(skb);
1089 if ((speed != self->io.speed) && (speed != -1)) { 1104 if (speed != self->io.speed && speed != -1) {
1090 /* Check for empty frame */ 1105 /* Check for empty frame */
1091 if (!skb->len) { 1106 if (!skb->len) {
1092 /* Note : you should make sure that speed changes 1107 /* Note : you should make sure that speed changes
1093 * are not going to corrupt any outgoing frame. 1108 * are not going to corrupt any outgoing frame.
1094 * Look at nsc-ircc for the gory details - Jean II */ 1109 * Look at nsc-ircc for the gory details - Jean II */
1095 smsc_ircc_change_speed(self, speed); 1110 smsc_ircc_change_speed(self, speed);
1096 spin_unlock_irqrestore(&self->lock, flags); 1111 spin_unlock_irqrestore(&self->lock, flags);
1097 dev_kfree_skb(skb); 1112 dev_kfree_skb(skb);
1098 return 0; 1113 return 0;
1099 } else 1114 }
1100 self->new_speed = speed; 1115
1116 self->new_speed = speed;
1101 } 1117 }
1102 1118
1103 memcpy(self->tx_buff.head, skb->data, skb->len); 1119 memcpy(self->tx_buff.head, skb->data, skb->len);
1104 1120
1105 self->tx_buff.len = skb->len; 1121 self->tx_buff.len = skb->len;
1106 self->tx_buff.data = self->tx_buff.head; 1122 self->tx_buff.data = self->tx_buff.head;
1107 1123
1108 mtt = irda_get_mtt(skb); 1124 mtt = irda_get_mtt(skb);
1109 if (mtt) { 1125 if (mtt) {
1110 int bofs; 1126 int bofs;
1111 1127
1112 /* 1128 /*
1113 * Compute how many BOFs (STA or PA's) we need to waste the 1129 * Compute how many BOFs (STA or PA's) we need to waste the
1114 * min turn time given the speed of the link. 1130 * min turn time given the speed of the link.
1115 */ 1131 */
@@ -1117,11 +1133,12 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1117 if (bofs > 4095) 1133 if (bofs > 4095)
1118 bofs = 4095; 1134 bofs = 4095;
1119 1135
1120 smsc_ircc_dma_xmit(self, iobase, bofs); 1136 smsc_ircc_dma_xmit(self, bofs);
1121 } else { 1137 } else {
1122 /* Transmit frame */ 1138 /* Transmit frame */
1123 smsc_ircc_dma_xmit(self, iobase, 0); 1139 smsc_ircc_dma_xmit(self, 0);
1124 } 1140 }
1141
1125 spin_unlock_irqrestore(&self->lock, flags); 1142 spin_unlock_irqrestore(&self->lock, flags);
1126 dev_kfree_skb(skb); 1143 dev_kfree_skb(skb);
1127 1144
@@ -1129,43 +1146,44 @@ static int smsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1129} 1146}
1130 1147
1131/* 1148/*
1132 * Function smsc_ircc_dma_xmit (self, iobase) 1149 * Function smsc_ircc_dma_xmit (self, bofs)
1133 * 1150 *
1134 * Transmit data using DMA 1151 * Transmit data using DMA
1135 * 1152 *
1136 */ 1153 */
1137static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs) 1154static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int bofs)
1138{ 1155{
1156 int iobase = self->io.fir_base;
1139 u8 ctrl; 1157 u8 ctrl;
1140 1158
1141 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1159 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1142#if 1 1160#if 1
1143 /* Disable Rx */ 1161 /* Disable Rx */
1144 register_bank(iobase, 0); 1162 register_bank(iobase, 0);
1145 outb(0x00, iobase+IRCC_LCR_B); 1163 outb(0x00, iobase + IRCC_LCR_B);
1146#endif 1164#endif
1147 register_bank(iobase, 1); 1165 register_bank(iobase, 1);
1148 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1166 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1149 iobase+IRCC_SCE_CFGB); 1167 iobase + IRCC_SCE_CFGB);
1150 1168
1151 self->io.direction = IO_XMIT; 1169 self->io.direction = IO_XMIT;
1152 1170
1153 /* Set BOF additional count for generating the min turn time */ 1171 /* Set BOF additional count for generating the min turn time */
1154 register_bank(iobase, 4); 1172 register_bank(iobase, 4);
1155 outb(bofs & 0xff, iobase+IRCC_BOF_COUNT_LO); 1173 outb(bofs & 0xff, iobase + IRCC_BOF_COUNT_LO);
1156 ctrl = inb(iobase+IRCC_CONTROL) & 0xf0; 1174 ctrl = inb(iobase + IRCC_CONTROL) & 0xf0;
1157 outb(ctrl | ((bofs >> 8) & 0x0f), iobase+IRCC_BOF_COUNT_HI); 1175 outb(ctrl | ((bofs >> 8) & 0x0f), iobase + IRCC_BOF_COUNT_HI);
1158 1176
1159 /* Set max Tx frame size */ 1177 /* Set max Tx frame size */
1160 outb(self->tx_buff.len >> 8, iobase+IRCC_TX_SIZE_HI); 1178 outb(self->tx_buff.len >> 8, iobase + IRCC_TX_SIZE_HI);
1161 outb(self->tx_buff.len & 0xff, iobase+IRCC_TX_SIZE_LO); 1179 outb(self->tx_buff.len & 0xff, iobase + IRCC_TX_SIZE_LO);
1162 1180
1163 /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/ 1181 /*outb(UART_MCR_OUT2, self->io.sir_base + UART_MCR);*/
1164 1182
1165 /* Enable burst mode chip Tx DMA */ 1183 /* Enable burst mode chip Tx DMA */
1166 register_bank(iobase, 1); 1184 register_bank(iobase, 1);
1167 outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE | 1185 outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
1168 IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB); 1186 IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
1169 1187
1170 /* Setup DMA controller (must be done after enabling chip DMA) */ 1188 /* Setup DMA controller (must be done after enabling chip DMA) */
1171 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, 1189 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
@@ -1174,50 +1192,52 @@ static void smsc_ircc_dma_xmit(struct smsc_ircc_cb *self, int iobase, int bofs)
1174 /* Enable interrupt */ 1192 /* Enable interrupt */
1175 1193
1176 register_bank(iobase, 0); 1194 register_bank(iobase, 0);
1177 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER); 1195 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
1178 outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER); 1196 outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
1179 1197
1180 /* Enable transmit */ 1198 /* Enable transmit */
1181 outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase+IRCC_LCR_B); 1199 outb(IRCC_LCR_B_SCE_TRANSMIT | IRCC_LCR_B_SIP_ENABLE, iobase + IRCC_LCR_B);
1182} 1200}
1183 1201
1184/* 1202/*
1185 * Function smsc_ircc_dma_xmit_complete (self) 1203 * Function smsc_ircc_dma_xmit_complete (self)
1186 * 1204 *
1187 * The transfer of a frame in finished. This function will only be called 1205 * The transfer of a frame in finished. This function will only be called
1188 * by the interrupt handler 1206 * by the interrupt handler
1189 * 1207 *
1190 */ 1208 */
1191static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase) 1209static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self)
1192{ 1210{
1211 int iobase = self->io.fir_base;
1212
1193 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1213 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1194#if 0 1214#if 0
1195 /* Disable Tx */ 1215 /* Disable Tx */
1196 register_bank(iobase, 0); 1216 register_bank(iobase, 0);
1197 outb(0x00, iobase+IRCC_LCR_B); 1217 outb(0x00, iobase + IRCC_LCR_B);
1198#endif 1218#endif
1199 register_bank(self->io.fir_base, 1); 1219 register_bank(iobase, 1);
1200 outb(inb(self->io.fir_base+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1220 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1201 self->io.fir_base+IRCC_SCE_CFGB); 1221 iobase + IRCC_SCE_CFGB);
1202 1222
1203 /* Check for underrun! */ 1223 /* Check for underrun! */
1204 register_bank(iobase, 0); 1224 register_bank(iobase, 0);
1205 if (inb(iobase+IRCC_LSR) & IRCC_LSR_UNDERRUN) { 1225 if (inb(iobase + IRCC_LSR) & IRCC_LSR_UNDERRUN) {
1206 self->stats.tx_errors++; 1226 self->stats.tx_errors++;
1207 self->stats.tx_fifo_errors++; 1227 self->stats.tx_fifo_errors++;
1208 1228
1209 /* Reset error condition */ 1229 /* Reset error condition */
1210 register_bank(iobase, 0); 1230 register_bank(iobase, 0);
1211 outb(IRCC_MASTER_ERROR_RESET, iobase+IRCC_MASTER); 1231 outb(IRCC_MASTER_ERROR_RESET, iobase + IRCC_MASTER);
1212 outb(0x00, iobase+IRCC_MASTER); 1232 outb(0x00, iobase + IRCC_MASTER);
1213 } else { 1233 } else {
1214 self->stats.tx_packets++; 1234 self->stats.tx_packets++;
1215 self->stats.tx_bytes += self->tx_buff.len; 1235 self->stats.tx_bytes += self->tx_buff.len;
1216 } 1236 }
1217 1237
1218 /* Check if it's time to change the speed */ 1238 /* Check if it's time to change the speed */
1219 if (self->new_speed) { 1239 if (self->new_speed) {
1220 smsc_ircc_change_speed(self, self->new_speed); 1240 smsc_ircc_change_speed(self, self->new_speed);
1221 self->new_speed = 0; 1241 self->new_speed = 0;
1222 } 1242 }
1223 1243
@@ -1231,31 +1251,32 @@ static void smsc_ircc_dma_xmit_complete(struct smsc_ircc_cb *self, int iobase)
1231 * if it starts to receive a frame. 1251 * if it starts to receive a frame.
1232 * 1252 *
1233 */ 1253 */
1234static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase) 1254static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self)
1235{ 1255{
1256 int iobase = self->io.fir_base;
1236#if 0 1257#if 0
1237 /* Turn off chip DMA */ 1258 /* Turn off chip DMA */
1238 register_bank(iobase, 1); 1259 register_bank(iobase, 1);
1239 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1260 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1240 iobase+IRCC_SCE_CFGB); 1261 iobase + IRCC_SCE_CFGB);
1241#endif 1262#endif
1242 1263
1243 /* Disable Tx */ 1264 /* Disable Tx */
1244 register_bank(iobase, 0); 1265 register_bank(iobase, 0);
1245 outb(0x00, iobase+IRCC_LCR_B); 1266 outb(0x00, iobase + IRCC_LCR_B);
1246 1267
1247 /* Turn off chip DMA */ 1268 /* Turn off chip DMA */
1248 register_bank(iobase, 1); 1269 register_bank(iobase, 1);
1249 outb(inb(iobase+IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE, 1270 outb(inb(iobase + IRCC_SCE_CFGB) & ~IRCC_CFGB_DMA_ENABLE,
1250 iobase+IRCC_SCE_CFGB); 1271 iobase + IRCC_SCE_CFGB);
1251 1272
1252 self->io.direction = IO_RECV; 1273 self->io.direction = IO_RECV;
1253 self->rx_buff.data = self->rx_buff.head; 1274 self->rx_buff.data = self->rx_buff.head;
1254 1275
1255 /* Set max Rx frame size */ 1276 /* Set max Rx frame size */
1256 register_bank(iobase, 4); 1277 register_bank(iobase, 4);
1257 outb((2050 >> 8) & 0x0f, iobase+IRCC_RX_SIZE_HI); 1278 outb((2050 >> 8) & 0x0f, iobase + IRCC_RX_SIZE_HI);
1258 outb(2050 & 0xff, iobase+IRCC_RX_SIZE_LO); 1279 outb(2050 & 0xff, iobase + IRCC_RX_SIZE_LO);
1259 1280
1260 /* Setup DMA controller */ 1281 /* Setup DMA controller */
1261 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, 1282 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
@@ -1263,83 +1284,83 @@ static int smsc_ircc_dma_receive(struct smsc_ircc_cb *self, int iobase)
1263 1284
1264 /* Enable burst mode chip Rx DMA */ 1285 /* Enable burst mode chip Rx DMA */
1265 register_bank(iobase, 1); 1286 register_bank(iobase, 1);
1266 outb(inb(iobase+IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE | 1287 outb(inb(iobase + IRCC_SCE_CFGB) | IRCC_CFGB_DMA_ENABLE |
1267 IRCC_CFGB_DMA_BURST, iobase+IRCC_SCE_CFGB); 1288 IRCC_CFGB_DMA_BURST, iobase + IRCC_SCE_CFGB);
1268 1289
1269 /* Enable interrupt */ 1290 /* Enable interrupt */
1270 register_bank(iobase, 0); 1291 register_bank(iobase, 0);
1271 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase+IRCC_IER); 1292 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
1272 outb(IRCC_MASTER_INT_EN, iobase+IRCC_MASTER); 1293 outb(IRCC_MASTER_INT_EN, iobase + IRCC_MASTER);
1273
1274 1294
1275 /* Enable receiver */ 1295 /* Enable receiver */
1276 register_bank(iobase, 0); 1296 register_bank(iobase, 0);
1277 outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE, 1297 outb(IRCC_LCR_B_SCE_RECEIVE | IRCC_LCR_B_SIP_ENABLE,
1278 iobase+IRCC_LCR_B); 1298 iobase + IRCC_LCR_B);
1279 1299
1280 return 0; 1300 return 0;
1281} 1301}
1282 1302
1283/* 1303/*
1284 * Function smsc_ircc_dma_receive_complete(self, iobase) 1304 * Function smsc_ircc_dma_receive_complete(self)
1285 * 1305 *
1286 * Finished with receiving frames 1306 * Finished with receiving frames
1287 * 1307 *
1288 */ 1308 */
1289static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase) 1309static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self)
1290{ 1310{
1291 struct sk_buff *skb; 1311 struct sk_buff *skb;
1292 int len, msgcnt, lsr; 1312 int len, msgcnt, lsr;
1293 1313 int iobase = self->io.fir_base;
1314
1294 register_bank(iobase, 0); 1315 register_bank(iobase, 0);
1295 1316
1296 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1317 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1297#if 0 1318#if 0
1298 /* Disable Rx */ 1319 /* Disable Rx */
1299 register_bank(iobase, 0); 1320 register_bank(iobase, 0);
1300 outb(0x00, iobase+IRCC_LCR_B); 1321 outb(0x00, iobase + IRCC_LCR_B);
1301#endif 1322#endif
1302 register_bank(iobase, 0); 1323 register_bank(iobase, 0);
1303 outb(inb(iobase+IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase+IRCC_LSAR); 1324 outb(inb(iobase + IRCC_LSAR) & ~IRCC_LSAR_ADDRESS_MASK, iobase + IRCC_LSAR);
1304 lsr= inb(iobase+IRCC_LSR); 1325 lsr= inb(iobase + IRCC_LSR);
1305 msgcnt = inb(iobase+IRCC_LCR_B) & 0x08; 1326 msgcnt = inb(iobase + IRCC_LCR_B) & 0x08;
1306 1327
1307 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__, 1328 IRDA_DEBUG(2, "%s: dma count = %d\n", __FUNCTION__,
1308 get_dma_residue(self->io.dma)); 1329 get_dma_residue(self->io.dma));
1309 1330
1310 len = self->rx_buff.truesize - get_dma_residue(self->io.dma); 1331 len = self->rx_buff.truesize - get_dma_residue(self->io.dma);
1311 1332
1312 /* Look for errors 1333 /* Look for errors */
1313 */ 1334 if (lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
1314
1315 if(lsr & (IRCC_LSR_FRAME_ERROR | IRCC_LSR_CRC_ERROR | IRCC_LSR_SIZE_ERROR)) {
1316 self->stats.rx_errors++; 1335 self->stats.rx_errors++;
1317 if(lsr & IRCC_LSR_FRAME_ERROR) self->stats.rx_frame_errors++; 1336 if (lsr & IRCC_LSR_FRAME_ERROR)
1318 if(lsr & IRCC_LSR_CRC_ERROR) self->stats.rx_crc_errors++; 1337 self->stats.rx_frame_errors++;
1319 if(lsr & IRCC_LSR_SIZE_ERROR) self->stats.rx_length_errors++; 1338 if (lsr & IRCC_LSR_CRC_ERROR)
1320 if(lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN)) self->stats.rx_length_errors++; 1339 self->stats.rx_crc_errors++;
1340 if (lsr & IRCC_LSR_SIZE_ERROR)
1341 self->stats.rx_length_errors++;
1342 if (lsr & (IRCC_LSR_UNDERRUN | IRCC_LSR_OVERRUN))
1343 self->stats.rx_length_errors++;
1321 return; 1344 return;
1322 } 1345 }
1346
1323 /* Remove CRC */ 1347 /* Remove CRC */
1324 if (self->io.speed < 4000000) 1348 len -= self->io.speed < 4000000 ? 2 : 4;
1325 len -= 2;
1326 else
1327 len -= 4;
1328 1349
1329 if ((len < 2) || (len > 2050)) { 1350 if (len < 2 || len > 2050) {
1330 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len); 1351 IRDA_WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);
1331 return; 1352 return;
1332 } 1353 }
1333 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len); 1354 IRDA_DEBUG(2, "%s: msgcnt = %d, len=%d\n", __FUNCTION__, msgcnt, len);
1334 1355
1335 skb = dev_alloc_skb(len+1); 1356 skb = dev_alloc_skb(len + 1);
1336 if (!skb) { 1357 if (!skb) {
1337 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n", 1358 IRDA_WARNING("%s(), memory squeeze, dropping frame.\n",
1338 __FUNCTION__); 1359 __FUNCTION__);
1339 return; 1360 return;
1340 } 1361 }
1341 /* Make sure IP header gets aligned */ 1362 /* Make sure IP header gets aligned */
1342 skb_reserve(skb, 1); 1363 skb_reserve(skb, 1);
1343 1364
1344 memcpy(skb_put(skb, len), self->rx_buff.data, len); 1365 memcpy(skb_put(skb, len), self->rx_buff.data, len);
1345 self->stats.rx_packets++; 1366 self->stats.rx_packets++;
@@ -1357,7 +1378,7 @@ static void smsc_ircc_dma_receive_complete(struct smsc_ircc_cb *self, int iobase
1357 * Receive one frame from the infrared port 1378 * Receive one frame from the infrared port
1358 * 1379 *
1359 */ 1380 */
1360static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self) 1381static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1361{ 1382{
1362 int boguscount = 0; 1383 int boguscount = 0;
1363 int iobase; 1384 int iobase;
@@ -1366,20 +1387,20 @@ static void smsc_ircc_sir_receive(struct smsc_ircc_cb *self)
1366 1387
1367 iobase = self->io.sir_base; 1388 iobase = self->io.sir_base;
1368 1389
1369 /* 1390 /*
1370 * Receive all characters in Rx FIFO, unwrap and unstuff them. 1391 * Receive all characters in Rx FIFO, unwrap and unstuff them.
1371 * async_unwrap_char will deliver all found frames 1392 * async_unwrap_char will deliver all found frames
1372 */ 1393 */
1373 do { 1394 do {
1374 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff, 1395 async_unwrap_char(self->netdev, &self->stats, &self->rx_buff,
1375 inb(iobase+UART_RX)); 1396 inb(iobase + UART_RX));
1376 1397
1377 /* Make sure we don't stay here to long */ 1398 /* Make sure we don't stay here to long */
1378 if (boguscount++ > 32) { 1399 if (boguscount++ > 32) {
1379 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__); 1400 IRDA_DEBUG(2, "%s(), breaking!\n", __FUNCTION__);
1380 break; 1401 break;
1381 } 1402 }
1382 } while (inb(iobase+UART_LSR) & UART_LSR_DR); 1403 } while (inb(iobase + UART_LSR) & UART_LSR_DR);
1383} 1404}
1384 1405
1385 1406
@@ -1397,18 +1418,19 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1397 irqreturn_t ret = IRQ_NONE; 1418 irqreturn_t ret = IRQ_NONE;
1398 1419
1399 if (dev == NULL) { 1420 if (dev == NULL) {
1400 printk(KERN_WARNING "%s: irq %d for unknown device.\n", 1421 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1401 driver_name, irq); 1422 driver_name, irq);
1402 goto irq_ret; 1423 goto irq_ret;
1403 } 1424 }
1404 self = (struct smsc_ircc_cb *) dev->priv; 1425
1426 self = netdev_priv(dev);
1405 IRDA_ASSERT(self != NULL, return IRQ_NONE;); 1427 IRDA_ASSERT(self != NULL, return IRQ_NONE;);
1406 1428
1407 /* Serialise the interrupt handler in various CPUs, stop Tx path */ 1429 /* Serialise the interrupt handler in various CPUs, stop Tx path */
1408 spin_lock(&self->lock); 1430 spin_lock(&self->lock);
1409 1431
1410 /* Check if we should use the SIR interrupt handler */ 1432 /* Check if we should use the SIR interrupt handler */
1411 if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) { 1433 if (self->io.speed <= SMSC_IRCC2_MAX_SIR_SPEED) {
1412 ret = smsc_ircc_interrupt_sir(dev); 1434 ret = smsc_ircc_interrupt_sir(dev);
1413 goto irq_ret_unlock; 1435 goto irq_ret_unlock;
1414 } 1436 }
@@ -1416,25 +1438,25 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1416 iobase = self->io.fir_base; 1438 iobase = self->io.fir_base;
1417 1439
1418 register_bank(iobase, 0); 1440 register_bank(iobase, 0);
1419 iir = inb(iobase+IRCC_IIR); 1441 iir = inb(iobase + IRCC_IIR);
1420 if (iir == 0) 1442 if (iir == 0)
1421 goto irq_ret_unlock; 1443 goto irq_ret_unlock;
1422 ret = IRQ_HANDLED; 1444 ret = IRQ_HANDLED;
1423 1445
1424 /* Disable interrupts */ 1446 /* Disable interrupts */
1425 outb(0, iobase+IRCC_IER); 1447 outb(0, iobase + IRCC_IER);
1426 lcra = inb(iobase+IRCC_LCR_A); 1448 lcra = inb(iobase + IRCC_LCR_A);
1427 lsr = inb(iobase+IRCC_LSR); 1449 lsr = inb(iobase + IRCC_LSR);
1428 1450
1429 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir); 1451 IRDA_DEBUG(2, "%s(), iir = 0x%02x\n", __FUNCTION__, iir);
1430 1452
1431 if (iir & IRCC_IIR_EOM) { 1453 if (iir & IRCC_IIR_EOM) {
1432 if (self->io.direction == IO_RECV) 1454 if (self->io.direction == IO_RECV)
1433 smsc_ircc_dma_receive_complete(self, iobase); 1455 smsc_ircc_dma_receive_complete(self);
1434 else 1456 else
1435 smsc_ircc_dma_xmit_complete(self, iobase); 1457 smsc_ircc_dma_xmit_complete(self);
1436 1458
1437 smsc_ircc_dma_receive(self, iobase); 1459 smsc_ircc_dma_receive(self);
1438 } 1460 }
1439 1461
1440 if (iir & IRCC_IIR_ACTIVE_FRAME) { 1462 if (iir & IRCC_IIR_ACTIVE_FRAME) {
@@ -1444,7 +1466,7 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1444 /* Enable interrupts again */ 1466 /* Enable interrupts again */
1445 1467
1446 register_bank(iobase, 0); 1468 register_bank(iobase, 0);
1447 outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, iobase+IRCC_IER); 1469 outb(IRCC_IER_ACTIVE_FRAME | IRCC_IER_EOM, iobase + IRCC_IER);
1448 1470
1449 irq_ret_unlock: 1471 irq_ret_unlock:
1450 spin_unlock(&self->lock); 1472 spin_unlock(&self->lock);
@@ -1459,7 +1481,7 @@ static irqreturn_t smsc_ircc_interrupt(int irq, void *dev_id, struct pt_regs *re
1459 */ 1481 */
1460static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev) 1482static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1461{ 1483{
1462 struct smsc_ircc_cb *self = dev->priv; 1484 struct smsc_ircc_cb *self = netdev_priv(dev);
1463 int boguscount = 0; 1485 int boguscount = 0;
1464 int iobase; 1486 int iobase;
1465 int iir, lsr; 1487 int iir, lsr;
@@ -1469,14 +1491,14 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1469 1491
1470 iobase = self->io.sir_base; 1492 iobase = self->io.sir_base;
1471 1493
1472 iir = inb(iobase+UART_IIR) & UART_IIR_ID; 1494 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
1473 if (iir == 0) 1495 if (iir == 0)
1474 return IRQ_NONE; 1496 return IRQ_NONE;
1475 while (iir) { 1497 while (iir) {
1476 /* Clear interrupt */ 1498 /* Clear interrupt */
1477 lsr = inb(iobase+UART_LSR); 1499 lsr = inb(iobase + UART_LSR);
1478 1500
1479 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 1501 IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
1480 __FUNCTION__, iir, lsr, iobase); 1502 __FUNCTION__, iir, lsr, iobase);
1481 1503
1482 switch (iir) { 1504 switch (iir) {
@@ -1496,13 +1518,13 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
1496 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", 1518 IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n",
1497 __FUNCTION__, iir); 1519 __FUNCTION__, iir);
1498 break; 1520 break;
1499 } 1521 }
1500 1522
1501 /* Make sure we don't stay here to long */ 1523 /* Make sure we don't stay here to long */
1502 if (boguscount++ > 100) 1524 if (boguscount++ > 100)
1503 break; 1525 break;
1504 1526
1505 iir = inb(iobase + UART_IIR) & UART_IIR_ID; 1527 iir = inb(iobase + UART_IIR) & UART_IIR_ID;
1506 } 1528 }
1507 /*spin_unlock(&self->lock);*/ 1529 /*spin_unlock(&self->lock);*/
1508 return IRQ_HANDLED; 1530 return IRQ_HANDLED;
@@ -1529,7 +1551,7 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1529 get_dma_residue(self->io.dma)); 1551 get_dma_residue(self->io.dma));
1530 1552
1531 status = (self->rx_buff.state != OUTSIDE_FRAME); 1553 status = (self->rx_buff.state != OUTSIDE_FRAME);
1532 1554
1533 return status; 1555 return status;
1534} 1556}
1535#endif /* unused */ 1557#endif /* unused */
@@ -1544,19 +1566,16 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1544static int smsc_ircc_net_open(struct net_device *dev) 1566static int smsc_ircc_net_open(struct net_device *dev)
1545{ 1567{
1546 struct smsc_ircc_cb *self; 1568 struct smsc_ircc_cb *self;
1547 int iobase;
1548 char hwname[16]; 1569 char hwname[16];
1549 unsigned long flags; 1570 unsigned long flags;
1550 1571
1551 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1572 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1552 1573
1553 IRDA_ASSERT(dev != NULL, return -1;); 1574 IRDA_ASSERT(dev != NULL, return -1;);
1554 self = (struct smsc_ircc_cb *) dev->priv; 1575 self = netdev_priv(dev);
1555 IRDA_ASSERT(self != NULL, return 0;); 1576 IRDA_ASSERT(self != NULL, return 0;);
1556
1557 iobase = self->io.fir_base;
1558 1577
1559 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1578 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1560 (void *) dev)) { 1579 (void *) dev)) {
1561 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1580 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
1562 __FUNCTION__, self->io.irq); 1581 __FUNCTION__, self->io.irq);
@@ -1568,14 +1587,14 @@ static int smsc_ircc_net_open(struct net_device *dev)
1568 self->io.speed = 0; 1587 self->io.speed = 0;
1569 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 1588 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1570 spin_unlock_irqrestore(&self->lock, flags); 1589 spin_unlock_irqrestore(&self->lock, flags);
1571 1590
1572 /* Give self a hardware name */ 1591 /* Give self a hardware name */
1573 /* It would be cool to offer the chip revision here - Jean II */ 1592 /* It would be cool to offer the chip revision here - Jean II */
1574 sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base); 1593 sprintf(hwname, "SMSC @ 0x%03x", self->io.fir_base);
1575 1594
1576 /* 1595 /*
1577 * Open new IrLAP layer instance, now that everything should be 1596 * Open new IrLAP layer instance, now that everything should be
1578 * initialized properly 1597 * initialized properly
1579 */ 1598 */
1580 self->irlap = irlap_open(dev, &self->qos, hwname); 1599 self->irlap = irlap_open(dev, &self->qos, hwname);
1581 1600
@@ -1590,7 +1609,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1590 __FUNCTION__, self->io.dma); 1609 __FUNCTION__, self->io.dma);
1591 return -EAGAIN; 1610 return -EAGAIN;
1592 } 1611 }
1593 1612
1594 netif_start_queue(dev); 1613 netif_start_queue(dev);
1595 1614
1596 return 0; 1615 return 0;
@@ -1605,73 +1624,53 @@ static int smsc_ircc_net_open(struct net_device *dev)
1605static int smsc_ircc_net_close(struct net_device *dev) 1624static int smsc_ircc_net_close(struct net_device *dev)
1606{ 1625{
1607 struct smsc_ircc_cb *self; 1626 struct smsc_ircc_cb *self;
1608 int iobase;
1609 1627
1610 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1628 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1611 1629
1612 IRDA_ASSERT(dev != NULL, return -1;); 1630 IRDA_ASSERT(dev != NULL, return -1;);
1613 self = (struct smsc_ircc_cb *) dev->priv; 1631 self = netdev_priv(dev);
1614 IRDA_ASSERT(self != NULL, return 0;); 1632 IRDA_ASSERT(self != NULL, return 0;);
1615
1616 iobase = self->io.fir_base;
1617 1633
1618 /* Stop device */ 1634 /* Stop device */
1619 netif_stop_queue(dev); 1635 netif_stop_queue(dev);
1620 1636
1621 /* Stop and remove instance of IrLAP */ 1637 /* Stop and remove instance of IrLAP */
1622 if (self->irlap) 1638 if (self->irlap)
1623 irlap_close(self->irlap); 1639 irlap_close(self->irlap);
1624 self->irlap = NULL; 1640 self->irlap = NULL;
1625 1641
1626 free_irq(self->io.irq, dev); 1642 free_irq(self->io.irq, dev);
1627
1628 disable_dma(self->io.dma); 1643 disable_dma(self->io.dma);
1629
1630 free_dma(self->io.dma); 1644 free_dma(self->io.dma);
1631 1645
1632 return 0; 1646 return 0;
1633} 1647}
1634 1648
1635 1649static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level)
1636static void smsc_ircc_suspend(struct smsc_ircc_cb *self)
1637{ 1650{
1638 IRDA_MESSAGE("%s, Suspending\n", driver_name); 1651 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1639 1652
1640 if (self->io.suspended) 1653 IRDA_MESSAGE("%s, Suspending\n", driver_name);
1641 return;
1642 1654
1643 smsc_ircc_net_close(self->netdev); 1655 if (level == SUSPEND_DISABLE && !self->io.suspended) {
1656 smsc_ircc_net_close(self->netdev);
1657 self->io.suspended = 1;
1658 }
1644 1659
1645 self->io.suspended = 1; 1660 return 0;
1646} 1661}
1647 1662
1648static void smsc_ircc_wakeup(struct smsc_ircc_cb *self) 1663static int smsc_ircc_resume(struct device *dev, u32 level)
1649{ 1664{
1650 if (!self->io.suspended) 1665 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1651 return;
1652 1666
1653 /* The code was doing a "cli()" here, but this can't be right. 1667 if (level == RESUME_ENABLE && self->io.suspended) {
1654 * If you need protection, do it in net_open with a spinlock
1655 * or give a good reason. - Jean II */
1656 1668
1657 smsc_ircc_net_open(self->netdev); 1669 smsc_ircc_net_open(self->netdev);
1658 1670 self->io.suspended = 0;
1659 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1660}
1661 1671
1662static int smsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data) 1672 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1663{ 1673 }
1664 struct smsc_ircc_cb *self = (struct smsc_ircc_cb*) dev->data;
1665 if (self) {
1666 switch (rqst) {
1667 case PM_SUSPEND:
1668 smsc_ircc_suspend(self);
1669 break;
1670 case PM_RESUME:
1671 smsc_ircc_wakeup(self);
1672 break;
1673 }
1674 }
1675 return 0; 1674 return 0;
1676} 1675}
1677 1676
@@ -1690,10 +1689,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1690 1689
1691 IRDA_ASSERT(self != NULL, return -1;); 1690 IRDA_ASSERT(self != NULL, return -1;);
1692 1691
1693 iobase = self->io.fir_base; 1692 platform_device_unregister(self->pldev);
1694
1695 if (self->pmdev)
1696 pm_unregister(self->pmdev);
1697 1693
1698 /* Remove netdevice */ 1694 /* Remove netdevice */
1699 unregister_netdev(self->netdev); 1695 unregister_netdev(self->netdev);
@@ -1702,15 +1698,16 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1702 spin_lock_irqsave(&self->lock, flags); 1698 spin_lock_irqsave(&self->lock, flags);
1703 1699
1704 /* Stop interrupts */ 1700 /* Stop interrupts */
1701 iobase = self->io.fir_base;
1705 register_bank(iobase, 0); 1702 register_bank(iobase, 0);
1706 outb(0, iobase+IRCC_IER); 1703 outb(0, iobase + IRCC_IER);
1707 outb(IRCC_MASTER_RESET, iobase+IRCC_MASTER); 1704 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1708 outb(0x00, iobase+IRCC_MASTER); 1705 outb(0x00, iobase + IRCC_MASTER);
1709#if 0 1706#if 0
1710 /* Reset to SIR mode */ 1707 /* Reset to SIR mode */
1711 register_bank(iobase, 1); 1708 register_bank(iobase, 1);
1712 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase+IRCC_SCE_CFGA); 1709 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase + IRCC_SCE_CFGA);
1713 outb(IRCC_CFGB_IR, iobase+IRCC_SCE_CFGB); 1710 outb(IRCC_CFGB_IR, iobase + IRCC_SCE_CFGB);
1714#endif 1711#endif
1715 spin_unlock_irqrestore(&self->lock, flags); 1712 spin_unlock_irqrestore(&self->lock, flags);
1716 1713
@@ -1720,7 +1717,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1720 1717
1721 release_region(self->io.fir_base, self->io.fir_ext); 1718 release_region(self->io.fir_base, self->io.fir_ext);
1722 1719
1723 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1720 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
1724 self->io.sir_base); 1721 self->io.sir_base);
1725 1722
1726 release_region(self->io.sir_base, self->io.sir_ext); 1723 release_region(self->io.sir_base, self->io.sir_ext);
@@ -1728,7 +1725,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1728 if (self->tx_buff.head) 1725 if (self->tx_buff.head)
1729 dma_free_coherent(NULL, self->tx_buff.truesize, 1726 dma_free_coherent(NULL, self->tx_buff.truesize,
1730 self->tx_buff.head, self->tx_buff_dma); 1727 self->tx_buff.head, self->tx_buff_dma);
1731 1728
1732 if (self->rx_buff.head) 1729 if (self->rx_buff.head)
1733 dma_free_coherent(NULL, self->rx_buff.truesize, 1730 dma_free_coherent(NULL, self->rx_buff.truesize,
1734 self->rx_buff.head, self->rx_buff_dma); 1731 self->rx_buff.head, self->rx_buff_dma);
@@ -1744,10 +1741,12 @@ static void __exit smsc_ircc_cleanup(void)
1744 1741
1745 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1742 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1746 1743
1747 for (i=0; i < 2; i++) { 1744 for (i = 0; i < 2; i++) {
1748 if (dev_self[i]) 1745 if (dev_self[i])
1749 smsc_ircc_close(dev_self[i]); 1746 smsc_ircc_close(dev_self[i]);
1750 } 1747 }
1748
1749 driver_unregister(&smsc_ircc_driver);
1751} 1750}
1752 1751
1753/* 1752/*
@@ -1763,34 +1762,34 @@ void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1763 1762
1764 IRDA_DEBUG(3, "%s\n", __FUNCTION__); 1763 IRDA_DEBUG(3, "%s\n", __FUNCTION__);
1765 1764
1766 IRDA_ASSERT(self != NULL, return;); 1765 IRDA_ASSERT(self != NULL, return;);
1767 dev= self->netdev; 1766 dev = self->netdev;
1768 IRDA_ASSERT(dev != NULL, return;); 1767 IRDA_ASSERT(dev != NULL, return;);
1769 dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir; 1768 dev->hard_start_xmit = &smsc_ircc_hard_xmit_sir;
1770 1769
1771 fir_base = self->io.fir_base; 1770 fir_base = self->io.fir_base;
1772 sir_base = self->io.sir_base; 1771 sir_base = self->io.sir_base;
1773 1772
1774 /* Reset everything */ 1773 /* Reset everything */
1775 outb(IRCC_MASTER_RESET, fir_base+IRCC_MASTER); 1774 outb(IRCC_MASTER_RESET, fir_base + IRCC_MASTER);
1776 1775
1777 #if SMSC_IRCC2_C_SIR_STOP 1776 #if SMSC_IRCC2_C_SIR_STOP
1778 /*smsc_ircc_sir_stop(self);*/ 1777 /*smsc_ircc_sir_stop(self);*/
1779 #endif 1778 #endif
1780 1779
1781 register_bank(fir_base, 1); 1780 register_bank(fir_base, 1);
1782 outb(((inb(fir_base+IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base+IRCC_SCE_CFGA); 1781 outb(((inb(fir_base + IRCC_SCE_CFGA) & IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK) | IRCC_CFGA_IRDA_SIR_A), fir_base + IRCC_SCE_CFGA);
1783 1782
1784 /* Initialize UART */ 1783 /* Initialize UART */
1785 outb(UART_LCR_WLEN8, sir_base+UART_LCR); /* Reset DLAB */ 1784 outb(UART_LCR_WLEN8, sir_base + UART_LCR); /* Reset DLAB */
1786 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base+UART_MCR); 1785 outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), sir_base + UART_MCR);
1787 1786
1788 /* Turn on interrups */ 1787 /* Turn on interrups */
1789 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base+UART_IER); 1788 outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, sir_base + UART_IER);
1790 1789
1791 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__); 1790 IRDA_DEBUG(3, "%s() - exit\n", __FUNCTION__);
1792 1791
1793 outb(0x00, fir_base+IRCC_MASTER); 1792 outb(0x00, fir_base + IRCC_MASTER);
1794} 1793}
1795 1794
1796#if SMSC_IRCC2_C_SIR_STOP 1795#if SMSC_IRCC2_C_SIR_STOP
@@ -1802,10 +1801,10 @@ void smsc_ircc_sir_stop(struct smsc_ircc_cb *self)
1802 iobase = self->io.sir_base; 1801 iobase = self->io.sir_base;
1803 1802
1804 /* Reset UART */ 1803 /* Reset UART */
1805 outb(0, iobase+UART_MCR); 1804 outb(0, iobase + UART_MCR);
1806 1805
1807 /* Turn off interrupts */ 1806 /* Turn off interrupts */
1808 outb(0, iobase+UART_IER); 1807 outb(0, iobase + UART_IER);
1809} 1808}
1810#endif 1809#endif
1811 1810
@@ -1831,16 +1830,16 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1831 /* Finished with frame? */ 1830 /* Finished with frame? */
1832 if (self->tx_buff.len > 0) { 1831 if (self->tx_buff.len > 0) {
1833 /* Write data left in transmit buffer */ 1832 /* Write data left in transmit buffer */
1834 actual = smsc_ircc_sir_write(iobase, self->io.fifo_size, 1833 actual = smsc_ircc_sir_write(iobase, self->io.fifo_size,
1835 self->tx_buff.data, self->tx_buff.len); 1834 self->tx_buff.data, self->tx_buff.len);
1836 self->tx_buff.data += actual; 1835 self->tx_buff.data += actual;
1837 self->tx_buff.len -= actual; 1836 self->tx_buff.len -= actual;
1838 } else { 1837 } else {
1839 1838
1840 /*if (self->tx_buff.len ==0) {*/ 1839 /*if (self->tx_buff.len ==0) {*/
1841 1840
1842 /* 1841 /*
1843 * Now serial buffer is almost free & we can start 1842 * Now serial buffer is almost free & we can start
1844 * transmission of another packet. But first we must check 1843 * transmission of another packet. But first we must check
1845 * if we need to change the speed of the hardware 1844 * if we need to change the speed of the hardware
1846 */ 1845 */
@@ -1856,21 +1855,19 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1856 } 1855 }
1857 self->stats.tx_packets++; 1856 self->stats.tx_packets++;
1858 1857
1859 if(self->io.speed <= 115200) { 1858 if (self->io.speed <= 115200) {
1860 /* 1859 /*
1861 * Reset Rx FIFO to make sure that all reflected transmit data 1860 * Reset Rx FIFO to make sure that all reflected transmit data
1862 * is discarded. This is needed for half duplex operation 1861 * is discarded. This is needed for half duplex operation
1863 */ 1862 */
1864 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR; 1863 fcr = UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR;
1865 if (self->io.speed < 38400) 1864 fcr |= self->io.speed < 38400 ?
1866 fcr |= UART_FCR_TRIGGER_1; 1865 UART_FCR_TRIGGER_1 : UART_FCR_TRIGGER_14;
1867 else
1868 fcr |= UART_FCR_TRIGGER_14;
1869 1866
1870 outb(fcr, iobase+UART_FCR); 1867 outb(fcr, iobase + UART_FCR);
1871 1868
1872 /* Turn on receive interrupts */ 1869 /* Turn on receive interrupts */
1873 outb(UART_IER_RDI, iobase+UART_IER); 1870 outb(UART_IER_RDI, iobase + UART_IER);
1874 } 1871 }
1875 } 1872 }
1876} 1873}
@@ -1884,17 +1881,17 @@ static void smsc_ircc_sir_write_wakeup(struct smsc_ircc_cb *self)
1884static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len) 1881static int smsc_ircc_sir_write(int iobase, int fifo_size, __u8 *buf, int len)
1885{ 1882{
1886 int actual = 0; 1883 int actual = 0;
1887 1884
1888 /* Tx FIFO should be empty! */ 1885 /* Tx FIFO should be empty! */
1889 if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) { 1886 if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) {
1890 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__); 1887 IRDA_WARNING("%s(), failed, fifo not empty!\n", __FUNCTION__);
1891 return 0; 1888 return 0;
1892 } 1889 }
1893 1890
1894 /* Fill FIFO with current frame */ 1891 /* Fill FIFO with current frame */
1895 while ((fifo_size-- > 0) && (actual < len)) { 1892 while (fifo_size-- > 0 && actual < len) {
1896 /* Transmit next byte */ 1893 /* Transmit next byte */
1897 outb(buf[actual], iobase+UART_TX); 1894 outb(buf[actual], iobase + UART_TX);
1898 actual++; 1895 actual++;
1899 } 1896 }
1900 return actual; 1897 return actual;
@@ -1921,20 +1918,21 @@ static int smsc_ircc_is_receiving(struct smsc_ircc_cb *self)
1921static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self) 1918static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
1922{ 1919{
1923 unsigned int i; 1920 unsigned int i;
1924 1921
1925 IRDA_ASSERT(self != NULL, return;); 1922 IRDA_ASSERT(self != NULL, return;);
1926 1923
1927 for(i=0; smsc_transceivers[i].name!=NULL; i++) 1924 for (i = 0; smsc_transceivers[i].name != NULL; i++)
1928 if((*smsc_transceivers[i].probe)(self->io.fir_base)) { 1925 if (smsc_transceivers[i].probe(self->io.fir_base)) {
1929 IRDA_MESSAGE(" %s transceiver found\n", 1926 IRDA_MESSAGE(" %s transceiver found\n",
1930 smsc_transceivers[i].name); 1927 smsc_transceivers[i].name);
1931 self->transceiver= i+1; 1928 self->transceiver= i + 1;
1932 return; 1929 return;
1933 } 1930 }
1931
1934 IRDA_MESSAGE("No transceiver found. Defaulting to %s\n", 1932 IRDA_MESSAGE("No transceiver found. Defaulting to %s\n",
1935 smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name); 1933 smsc_transceivers[SMSC_IRCC2_C_DEFAULT_TRANSCEIVER].name);
1936 1934
1937 self->transceiver= SMSC_IRCC2_C_DEFAULT_TRANSCEIVER; 1935 self->transceiver = SMSC_IRCC2_C_DEFAULT_TRANSCEIVER;
1938} 1936}
1939 1937
1940 1938
@@ -1947,9 +1945,10 @@ static void smsc_ircc_probe_transceiver(struct smsc_ircc_cb *self)
1947static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed) 1945static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 speed)
1948{ 1946{
1949 unsigned int trx; 1947 unsigned int trx;
1950 1948
1951 trx = self->transceiver; 1949 trx = self->transceiver;
1952 if(trx>0) (*smsc_transceivers[trx-1].set_for_speed)(self->io.fir_base, speed); 1950 if (trx > 0)
1951 smsc_transceivers[trx - 1].set_for_speed(self->io.fir_base, speed);
1953} 1952}
1954 1953
1955/* 1954/*
@@ -1977,16 +1976,14 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
1977 1976
1978static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self) 1977static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
1979{ 1978{
1980 int iobase; 1979 int iobase = self->io.sir_base;
1981 int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US; 1980 int count = SMSC_IRCC2_HW_TRANSMITTER_TIMEOUT_US;
1982 1981
1983 iobase = self->io.sir_base;
1984
1985 /* Calibrated busy loop */ 1982 /* Calibrated busy loop */
1986 while((count-- > 0) && !(inb(iobase+UART_LSR) & UART_LSR_TEMT)) 1983 while (count-- > 0 && !(inb(iobase + UART_LSR) & UART_LSR_TEMT))
1987 udelay(1); 1984 udelay(1);
1988 1985
1989 if(count == 0) 1986 if (count == 0)
1990 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__); 1987 IRDA_DEBUG(0, "%s(): stuck transmitter\n", __FUNCTION__);
1991} 1988}
1992 1989
@@ -1998,40 +1995,42 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
1998 1995
1999static int __init smsc_ircc_look_for_chips(void) 1996static int __init smsc_ircc_look_for_chips(void)
2000{ 1997{
2001 smsc_chip_address_t *address; 1998 struct smsc_chip_address *address;
2002 char *type; 1999 char *type;
2003 unsigned int cfg_base, found; 2000 unsigned int cfg_base, found;
2004 2001
2005 found = 0; 2002 found = 0;
2006 address = possible_addresses; 2003 address = possible_addresses;
2007 2004
2008 while(address->cfg_base){ 2005 while (address->cfg_base) {
2009 cfg_base = address->cfg_base; 2006 cfg_base = address->cfg_base;
2010 2007
2011 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/ 2008 /*printk(KERN_WARNING "%s(): probing: 0x%02x for: 0x%02x\n", __FUNCTION__, cfg_base, address->type);*/
2012 2009
2013 if( address->type & SMSCSIO_TYPE_FDC){ 2010 if (address->type & SMSCSIO_TYPE_FDC) {
2014 type = "FDC"; 2011 type = "FDC";
2015 if((address->type) & SMSCSIO_TYPE_FLAT) { 2012 if (address->type & SMSCSIO_TYPE_FLAT)
2016 if(!smsc_superio_flat(fdc_chips_flat,cfg_base, type)) found++; 2013 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, type))
2017 } 2014 found++;
2018 if((address->type) & SMSCSIO_TYPE_PAGED) { 2015
2019 if(!smsc_superio_paged(fdc_chips_paged,cfg_base, type)) found++; 2016 if (address->type & SMSCSIO_TYPE_PAGED)
2020 } 2017 if (!smsc_superio_paged(fdc_chips_paged, cfg_base, type))
2018 found++;
2021 } 2019 }
2022 if( address->type & SMSCSIO_TYPE_LPC){ 2020 if (address->type & SMSCSIO_TYPE_LPC) {
2023 type = "LPC"; 2021 type = "LPC";
2024 if((address->type) & SMSCSIO_TYPE_FLAT) { 2022 if (address->type & SMSCSIO_TYPE_FLAT)
2025 if(!smsc_superio_flat(lpc_chips_flat,cfg_base,type)) found++; 2023 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, type))
2026 } 2024 found++;
2027 if((address->type) & SMSCSIO_TYPE_PAGED) { 2025
2028 if(!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC")) found++; 2026 if (address->type & SMSCSIO_TYPE_PAGED)
2029 } 2027 if (!smsc_superio_paged(lpc_chips_paged, cfg_base, type))
2028 found++;
2030 } 2029 }
2031 address++; 2030 address++;
2032 } 2031 }
2033 return found; 2032 return found;
2034} 2033}
2035 2034
2036/* 2035/*
2037 * Function smsc_superio_flat (chip, base, type) 2036 * Function smsc_superio_flat (chip, base, type)
@@ -2039,7 +2038,7 @@ static int __init smsc_ircc_look_for_chips(void)
2039 * Try to get configuration of a smc SuperIO chip with flat register model 2038 * Try to get configuration of a smc SuperIO chip with flat register model
2040 * 2039 *
2041 */ 2040 */
2042static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfgbase, char *type) 2041static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfgbase, char *type)
2043{ 2042{
2044 unsigned short firbase, sirbase; 2043 unsigned short firbase, sirbase;
2045 u8 mode, dma, irq; 2044 u8 mode, dma, irq;
@@ -2047,39 +2046,37 @@ static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg
2047 2046
2048 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2047 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2049 2048
2050 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type)==NULL) 2049 if (smsc_ircc_probe(cfgbase, SMSCSIOFLAT_DEVICEID_REG, chips, type) == NULL)
2051 return ret; 2050 return ret;
2052 2051
2053 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase); 2052 outb(SMSCSIOFLAT_UARTMODE0C_REG, cfgbase);
2054 mode = inb(cfgbase+1); 2053 mode = inb(cfgbase + 1);
2055 2054
2056 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/ 2055 /*printk(KERN_WARNING "%s(): mode: 0x%02x\n", __FUNCTION__, mode);*/
2057 2056
2058 if(!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA)) 2057 if (!(mode & SMSCSIOFLAT_UART2MODE_VAL_IRDA))
2059 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__); 2058 IRDA_WARNING("%s(): IrDA not enabled\n", __FUNCTION__);
2060 2059
2061 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase); 2060 outb(SMSCSIOFLAT_UART2BASEADDR_REG, cfgbase);
2062 sirbase = inb(cfgbase+1) << 2; 2061 sirbase = inb(cfgbase + 1) << 2;
2063 2062
2064 /* FIR iobase */ 2063 /* FIR iobase */
2065 outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase); 2064 outb(SMSCSIOFLAT_FIRBASEADDR_REG, cfgbase);
2066 firbase = inb(cfgbase+1) << 3; 2065 firbase = inb(cfgbase + 1) << 3;
2067 2066
2068 /* DMA */ 2067 /* DMA */
2069 outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase); 2068 outb(SMSCSIOFLAT_FIRDMASELECT_REG, cfgbase);
2070 dma = inb(cfgbase+1) & SMSCSIOFLAT_FIRDMASELECT_MASK; 2069 dma = inb(cfgbase + 1) & SMSCSIOFLAT_FIRDMASELECT_MASK;
2071 2070
2072 /* IRQ */ 2071 /* IRQ */
2073 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase); 2072 outb(SMSCSIOFLAT_UARTIRQSELECT_REG, cfgbase);
2074 irq = inb(cfgbase+1) & SMSCSIOFLAT_UART2IRQSELECT_MASK; 2073 irq = inb(cfgbase + 1) & SMSCSIOFLAT_UART2IRQSELECT_MASK;
2075 2074
2076 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode); 2075 IRDA_MESSAGE("%s(): fir: 0x%02x, sir: 0x%02x, dma: %02d, irq: %d, mode: 0x%02x\n", __FUNCTION__, firbase, sirbase, dma, irq, mode);
2077 2076
2078 if (firbase) { 2077 if (firbase && smsc_ircc_open(firbase, sirbase, dma, irq) == 0)
2079 if (smsc_ircc_open(firbase, sirbase, dma, irq) == 0) 2078 ret = 0;
2080 ret=0; 2079
2081 }
2082
2083 /* Exit configuration */ 2080 /* Exit configuration */
2084 outb(SMSCSIO_CFGEXITKEY, cfgbase); 2081 outb(SMSCSIO_CFGEXITKEY, cfgbase);
2085 2082
@@ -2092,26 +2089,26 @@ static int __init smsc_superio_flat(const smsc_chip_t *chips, unsigned short cfg
2092 * Try to get configuration of a smc SuperIO chip with paged register model 2089 * Try to get configuration of a smc SuperIO chip with paged register model
2093 * 2090 *
2094 */ 2091 */
2095static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cfg_base, char *type) 2092static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type)
2096{ 2093{
2097 unsigned short fir_io, sir_io; 2094 unsigned short fir_io, sir_io;
2098 int ret = -ENODEV; 2095 int ret = -ENODEV;
2099 2096
2100 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2097 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2101 2098
2102 if (smsc_ircc_probe(cfg_base,0x20,chips,type)==NULL) 2099 if (smsc_ircc_probe(cfg_base, 0x20, chips, type) == NULL)
2103 return ret; 2100 return ret;
2104 2101
2105 /* Select logical device (UART2) */ 2102 /* Select logical device (UART2) */
2106 outb(0x07, cfg_base); 2103 outb(0x07, cfg_base);
2107 outb(0x05, cfg_base + 1); 2104 outb(0x05, cfg_base + 1);
2108 2105
2109 /* SIR iobase */ 2106 /* SIR iobase */
2110 outb(0x60, cfg_base); 2107 outb(0x60, cfg_base);
2111 sir_io = inb(cfg_base + 1) << 8; 2108 sir_io = inb(cfg_base + 1) << 8;
2112 outb(0x61, cfg_base); 2109 outb(0x61, cfg_base);
2113 sir_io |= inb(cfg_base + 1); 2110 sir_io |= inb(cfg_base + 1);
2114 2111
2115 /* Read FIR base */ 2112 /* Read FIR base */
2116 outb(0x62, cfg_base); 2113 outb(0x62, cfg_base);
2117 fir_io = inb(cfg_base + 1) << 8; 2114 fir_io = inb(cfg_base + 1) << 8;
@@ -2119,11 +2116,9 @@ static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cf
2119 fir_io |= inb(cfg_base + 1); 2116 fir_io |= inb(cfg_base + 1);
2120 outb(0x2b, cfg_base); /* ??? */ 2117 outb(0x2b, cfg_base); /* ??? */
2121 2118
2122 if (fir_io) { 2119 if (fir_io && smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0)
2123 if (smsc_ircc_open(fir_io, sir_io, ircc_dma, ircc_irq) == 0) 2120 ret = 0;
2124 ret=0; 2121
2125 }
2126
2127 /* Exit configuration */ 2122 /* Exit configuration */
2128 outb(SMSCSIO_CFGEXITKEY, cfg_base); 2123 outb(SMSCSIO_CFGEXITKEY, cfg_base);
2129 2124
@@ -2131,21 +2126,17 @@ static int __init smsc_superio_paged(const smsc_chip_t *chips, unsigned short cf
2131} 2126}
2132 2127
2133 2128
2134static int __init smsc_access(unsigned short cfg_base,unsigned char reg) 2129static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
2135{ 2130{
2136 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2131 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2137 2132
2138 outb(reg, cfg_base); 2133 outb(reg, cfg_base);
2139 2134 return inb(cfg_base) != reg ? -1 : 0;
2140 if (inb(cfg_base)!=reg)
2141 return -1;
2142
2143 return 0;
2144} 2135}
2145 2136
2146static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg,const smsc_chip_t *chip,char *type) 2137static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
2147{ 2138{
2148 u8 devid,xdevid,rev; 2139 u8 devid, xdevid, rev;
2149 2140
2150 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 2141 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
2151 2142
@@ -2158,7 +2149,7 @@ static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg
2158 2149
2159 outb(reg, cfg_base); 2150 outb(reg, cfg_base);
2160 2151
2161 xdevid=inb(cfg_base+1); 2152 xdevid = inb(cfg_base + 1);
2162 2153
2163 /* Enter configuration */ 2154 /* Enter configuration */
2164 2155
@@ -2168,51 +2159,49 @@ static const smsc_chip_t * __init smsc_ircc_probe(unsigned short cfg_base,u8 reg
2168 if (smsc_access(cfg_base,0x55)) /* send second key and check */ 2159 if (smsc_access(cfg_base,0x55)) /* send second key and check */
2169 return NULL; 2160 return NULL;
2170 #endif 2161 #endif
2171 2162
2172 /* probe device ID */ 2163 /* probe device ID */
2173 2164
2174 if (smsc_access(cfg_base,reg)) 2165 if (smsc_access(cfg_base, reg))
2175 return NULL; 2166 return NULL;
2176 2167
2177 devid=inb(cfg_base+1); 2168 devid = inb(cfg_base + 1);
2178
2179 if (devid==0) /* typical value for unused port */
2180 return NULL;
2181 2169
2182 if (devid==0xff) /* typical value for unused port */ 2170 if (devid == 0 || devid == 0xff) /* typical values for unused port */
2183 return NULL; 2171 return NULL;
2184 2172
2185 /* probe revision ID */ 2173 /* probe revision ID */
2186 2174
2187 if (smsc_access(cfg_base,reg+1)) 2175 if (smsc_access(cfg_base, reg + 1))
2188 return NULL; 2176 return NULL;
2189 2177
2190 rev=inb(cfg_base+1); 2178 rev = inb(cfg_base + 1);
2191 2179
2192 if (rev>=128) /* i think this will make no sense */ 2180 if (rev >= 128) /* i think this will make no sense */
2193 return NULL; 2181 return NULL;
2194 2182
2195 if (devid==xdevid) /* protection against false positives */ 2183 if (devid == xdevid) /* protection against false positives */
2196 return NULL; 2184 return NULL;
2197 2185
2198 /* Check for expected device ID; are there others? */ 2186 /* Check for expected device ID; are there others? */
2199 2187
2200 while(chip->devid!=devid) { 2188 while (chip->devid != devid) {
2201 2189
2202 chip++; 2190 chip++;
2203 2191
2204 if (chip->name==NULL) 2192 if (chip->name == NULL)
2205 return NULL; 2193 return NULL;
2206 } 2194 }
2207 2195
2208 IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",devid,rev,cfg_base,type,chip->name); 2196 IRDA_MESSAGE("found SMC SuperIO Chip (devid=0x%02x rev=%02X base=0x%04x): %s%s\n",
2197 devid, rev, cfg_base, type, chip->name);
2209 2198
2210 if (chip->rev>rev){ 2199 if (chip->rev > rev) {
2211 IRDA_MESSAGE("Revision higher than expected\n"); 2200 IRDA_MESSAGE("Revision higher than expected\n");
2212 return NULL; 2201 return NULL;
2213 } 2202 }
2214 2203
2215 if (chip->flags&NoIRDA) 2204 if (chip->flags & NoIRDA)
2216 IRDA_MESSAGE("chipset does not support IRDA\n"); 2205 IRDA_MESSAGE("chipset does not support IRDA\n");
2217 2206
2218 return chip; 2207 return chip;
@@ -2226,8 +2215,8 @@ static int __init smsc_superio_fdc(unsigned short cfg_base)
2226 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2215 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2227 __FUNCTION__, cfg_base); 2216 __FUNCTION__, cfg_base);
2228 } else { 2217 } else {
2229 if (!smsc_superio_flat(fdc_chips_flat,cfg_base,"FDC") 2218 if (!smsc_superio_flat(fdc_chips_flat, cfg_base, "FDC") ||
2230 ||!smsc_superio_paged(fdc_chips_paged,cfg_base,"FDC")) 2219 !smsc_superio_paged(fdc_chips_paged, cfg_base, "FDC"))
2231 ret = 0; 2220 ret = 0;
2232 2221
2233 release_region(cfg_base, 2); 2222 release_region(cfg_base, 2);
@@ -2244,9 +2233,10 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2244 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n", 2233 IRDA_WARNING("%s: can't get cfg_base of 0x%03x\n",
2245 __FUNCTION__, cfg_base); 2234 __FUNCTION__, cfg_base);
2246 } else { 2235 } else {
2247 if (!smsc_superio_flat(lpc_chips_flat,cfg_base,"LPC") 2236 if (!smsc_superio_flat(lpc_chips_flat, cfg_base, "LPC") ||
2248 ||!smsc_superio_paged(lpc_chips_paged,cfg_base,"LPC")) 2237 !smsc_superio_paged(lpc_chips_paged, cfg_base, "LPC"))
2249 ret = 0; 2238 ret = 0;
2239
2250 release_region(cfg_base, 2); 2240 release_region(cfg_base, 2);
2251 } 2241 }
2252 return ret; 2242 return ret;
@@ -2269,18 +2259,23 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
2269static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed) 2259static void smsc_ircc_set_transceiver_smsc_ircc_atc(int fir_base, u32 speed)
2270{ 2260{
2271 unsigned long jiffies_now, jiffies_timeout; 2261 unsigned long jiffies_now, jiffies_timeout;
2272 u8 val; 2262 u8 val;
2273 2263
2274 jiffies_now= jiffies; 2264 jiffies_now = jiffies;
2275 jiffies_timeout= jiffies+SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES; 2265 jiffies_timeout = jiffies + SMSC_IRCC2_ATC_PROGRAMMING_TIMEOUT_JIFFIES;
2276 2266
2277 /* ATC */ 2267 /* ATC */
2278 register_bank(fir_base, 4); 2268 register_bank(fir_base, 4);
2279 outb((inb(fir_base+IRCC_ATC) & IRCC_ATC_MASK) |IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE, fir_base+IRCC_ATC); 2269 outb((inb(fir_base + IRCC_ATC) & IRCC_ATC_MASK) | IRCC_ATC_nPROGREADY|IRCC_ATC_ENABLE,
2280 while((val=(inb(fir_base+IRCC_ATC) & IRCC_ATC_nPROGREADY)) && !time_after(jiffies, jiffies_timeout)); 2270 fir_base + IRCC_ATC);
2281 if(val) 2271
2272 while ((val = (inb(fir_base + IRCC_ATC) & IRCC_ATC_nPROGREADY)) &&
2273 !time_after(jiffies, jiffies_timeout))
2274 /* empty */;
2275
2276 if (val)
2282 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__, 2277 IRDA_WARNING("%s(): ATC: 0x%02x\n", __FUNCTION__,
2283 inb(fir_base+IRCC_ATC)); 2278 inb(fir_base + IRCC_ATC));
2284} 2279}
2285 2280
2286/* 2281/*
@@ -2298,34 +2293,32 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base)
2298/* 2293/*
2299 * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed) 2294 * Function smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(self, speed)
2300 * 2295 *
2301 * Set transceiver 2296 * Set transceiver
2302 * 2297 *
2303 */ 2298 */
2304 2299
2305static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed) 2300static void smsc_ircc_set_transceiver_smsc_ircc_fast_pin_select(int fir_base, u32 speed)
2306{ 2301{
2307 u8 fast_mode; 2302 u8 fast_mode;
2308 2303
2309 switch(speed) 2304 switch (speed) {
2310 { 2305 default:
2311 default: 2306 case 576000 :
2312 case 576000 : 2307 fast_mode = 0;
2313 fast_mode = 0;
2314 break; 2308 break;
2315 case 1152000 : 2309 case 1152000 :
2316 case 4000000 : 2310 case 4000000 :
2317 fast_mode = IRCC_LCR_A_FAST; 2311 fast_mode = IRCC_LCR_A_FAST;
2318 break; 2312 break;
2319
2320 } 2313 }
2321 register_bank(fir_base, 0); 2314 register_bank(fir_base, 0);
2322 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A); 2315 outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
2323} 2316}
2324 2317
2325/* 2318/*
2326 * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base) 2319 * Function smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(fir_base)
2327 * 2320 *
2328 * Probe transceiver 2321 * Probe transceiver
2329 * 2322 *
2330 */ 2323 */
2331 2324
@@ -2337,35 +2330,34 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_fast_pin_select(int fir_base)
2337/* 2330/*
2338 * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed) 2331 * Function smsc_ircc_set_transceiver_toshiba_sat1800(fir_base, speed)
2339 * 2332 *
2340 * Set transceiver 2333 * Set transceiver
2341 * 2334 *
2342 */ 2335 */
2343 2336
2344static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed) 2337static void smsc_ircc_set_transceiver_toshiba_sat1800(int fir_base, u32 speed)
2345{ 2338{
2346 u8 fast_mode; 2339 u8 fast_mode;
2347 2340
2348 switch(speed) 2341 switch (speed) {
2349 { 2342 default:
2350 default: 2343 case 576000 :
2351 case 576000 : 2344 fast_mode = 0;
2352 fast_mode = 0;
2353 break; 2345 break;
2354 case 1152000 : 2346 case 1152000 :
2355 case 4000000 : 2347 case 4000000 :
2356 fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA; 2348 fast_mode = /*IRCC_LCR_A_FAST |*/ IRCC_LCR_A_GP_DATA;
2357 break; 2349 break;
2358 2350
2359 } 2351 }
2360 /* This causes an interrupt */ 2352 /* This causes an interrupt */
2361 register_bank(fir_base, 0); 2353 register_bank(fir_base, 0);
2362 outb((inb(fir_base+IRCC_LCR_A) & 0xbf) | fast_mode, fir_base+IRCC_LCR_A); 2354 outb((inb(fir_base + IRCC_LCR_A) & 0xbf) | fast_mode, fir_base + IRCC_LCR_A);
2363} 2355}
2364 2356
2365/* 2357/*
2366 * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base) 2358 * Function smsc_ircc_probe_transceiver_toshiba_sat1800(fir_base)
2367 * 2359 *
2368 * Probe transceiver 2360 * Probe transceiver
2369 * 2361 *
2370 */ 2362 */
2371 2363
@@ -2377,20 +2369,3 @@ static int smsc_ircc_probe_transceiver_toshiba_sat1800(int fir_base)
2377 2369
2378module_init(smsc_ircc_init); 2370module_init(smsc_ircc_init);
2379module_exit(smsc_ircc_cleanup); 2371module_exit(smsc_ircc_cleanup);
2380
2381MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
2382MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
2383MODULE_LICENSE("GPL");
2384
2385module_param(ircc_dma, int, 0);
2386MODULE_PARM_DESC(ircc_dma, "DMA channel");
2387module_param(ircc_irq, int, 0);
2388MODULE_PARM_DESC(ircc_irq, "IRQ line");
2389module_param(ircc_fir, int, 0);
2390MODULE_PARM_DESC(ircc_fir, "FIR Base Address");
2391module_param(ircc_sir, int, 0);
2392MODULE_PARM_DESC(ircc_sir, "SIR Base Address");
2393module_param(ircc_cfg, int, 0);
2394MODULE_PARM_DESC(ircc_cfg, "Configuration register base address");
2395module_param(ircc_transceiver, int, 0);
2396MODULE_PARM_DESC(ircc_transceiver, "Transceiver type");
diff --git a/drivers/net/irda/smsc-ircc2.h b/drivers/net/irda/smsc-ircc2.h
index 458611cc0d40..0c36286d87f7 100644
--- a/drivers/net/irda/smsc-ircc2.h
+++ b/drivers/net/irda/smsc-ircc2.h
@@ -1,5 +1,5 @@
1/********************************************************************* 1/*********************************************************************
2 * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $ 2 * $Id: smsc-ircc2.h,v 1.12.2.1 2002/10/27 10:52:37 dip Exp $
3 * 3 *
4 * Description: Definitions for the SMC IrCC chipset 4 * Description: Definitions for the SMC IrCC chipset
5 * Status: Experimental. 5 * Status: Experimental.
@@ -9,25 +9,25 @@
9 * All Rights Reserved. 9 * All Rights Reserved.
10 * 10 *
11 * Based on smc-ircc.h: 11 * Based on smc-ircc.h:
12 * 12 *
13 * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no> 13 * Copyright (c) 1999-2000, Dag Brattli <dagb@cs.uit.no>
14 * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net> 14 * Copyright (c) 1998-1999, Thomas Davis (tadavis@jps.net>
15 * All Rights Reserved 15 * All Rights Reserved
16 * 16 *
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License as 19 * modify it under the terms of the GNU General Public License as
20 * published by the Free Software Foundation; either version 2 of 20 * published by the Free Software Foundation; either version 2 of
21 * the License, or (at your option) any later version. 21 * the License, or (at your option) any later version.
22 * 22 *
23 * This program is distributed in the hope that it will be useful, 23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of 24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details. 26 * GNU General Public License for more details.
27 * 27 *
28 * You should have received a copy of the GNU General Public License 28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
31 * MA 02111-1307 USA 31 * MA 02111-1307 USA
32 * 32 *
33 ********************************************************************/ 33 ********************************************************************/
@@ -112,10 +112,10 @@
112 112
113#define IRCC_CFGA_COM 0x00 113#define IRCC_CFGA_COM 0x00
114#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87 114#define IRCC_SCE_CFGA_BLOCK_CTRL_BITS_MASK 0x87
115#define IRCC_CFGA_IRDA_SIR_A 0x08 115#define IRCC_CFGA_IRDA_SIR_A 0x08
116#define IRCC_CFGA_ASK_SIR 0x10 116#define IRCC_CFGA_ASK_SIR 0x10
117#define IRCC_CFGA_IRDA_SIR_B 0x18 117#define IRCC_CFGA_IRDA_SIR_B 0x18
118#define IRCC_CFGA_IRDA_HDLC 0x20 118#define IRCC_CFGA_IRDA_HDLC 0x20
119#define IRCC_CFGA_IRDA_4PPM 0x28 119#define IRCC_CFGA_IRDA_4PPM 0x28
120#define IRCC_CFGA_CONSUMER 0x30 120#define IRCC_CFGA_CONSUMER 0x30
121#define IRCC_CFGA_RAW_IR 0x38 121#define IRCC_CFGA_RAW_IR 0x38
@@ -130,7 +130,7 @@
130#define IRCC_CFGB_LPBCK_TX_CRC 0x10 130#define IRCC_CFGB_LPBCK_TX_CRC 0x10
131#define IRCC_CFGB_NOWAIT 0x08 131#define IRCC_CFGB_NOWAIT 0x08
132#define IRCC_CFGB_STRING_MOVE 0x04 132#define IRCC_CFGB_STRING_MOVE 0x04
133#define IRCC_CFGB_DMA_BURST 0x02 133#define IRCC_CFGB_DMA_BURST 0x02
134#define IRCC_CFGB_DMA_ENABLE 0x01 134#define IRCC_CFGB_DMA_ENABLE 0x01
135 135
136#define IRCC_CFGB_MUX_COM 0x00 136#define IRCC_CFGB_MUX_COM 0x00
@@ -141,11 +141,11 @@
141/* Register block 3 - Identification Registers! */ 141/* Register block 3 - Identification Registers! */
142#define IRCC_ID_HIGH 0x00 /* 0x10 */ 142#define IRCC_ID_HIGH 0x00 /* 0x10 */
143#define IRCC_ID_LOW 0x01 /* 0xB8 */ 143#define IRCC_ID_LOW 0x01 /* 0xB8 */
144#define IRCC_CHIP_ID 0x02 /* 0xF1 */ 144#define IRCC_CHIP_ID 0x02 /* 0xF1 */
145#define IRCC_VERSION 0x03 /* 0x01 */ 145#define IRCC_VERSION 0x03 /* 0x01 */
146#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */ 146#define IRCC_INTERFACE 0x04 /* low 4 = DMA, high 4 = IRQ */
147#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */ 147#define IRCC_INTERFACE_DMA_MASK 0x0F /* low 4 = DMA, high 4 = IRQ */
148#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */ 148#define IRCC_INTERFACE_IRQ_MASK 0xF0 /* low 4 = DMA, high 4 = IRQ */
149 149
150/* Register block 4 - IrDA */ 150/* Register block 4 - IrDA */
151#define IRCC_CONTROL 0x00 151#define IRCC_CONTROL 0x00
@@ -163,10 +163,10 @@
163 163
164/* Register block 5 - IrDA */ 164/* Register block 5 - IrDA */
165#define IRCC_ATC 0x00 165#define IRCC_ATC 0x00
166#define IRCC_ATC_nPROGREADY 0x80 166#define IRCC_ATC_nPROGREADY 0x80
167#define IRCC_ATC_SPEED 0x40 167#define IRCC_ATC_SPEED 0x40
168#define IRCC_ATC_ENABLE 0x20 168#define IRCC_ATC_ENABLE 0x20
169#define IRCC_ATC_MASK 0xE0 169#define IRCC_ATC_MASK 0xE0
170 170
171 171
172#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01 172#define IRCC_IRHALFDUPLEX_TIMEOUT 0x01
@@ -178,8 +178,8 @@
178 */ 178 */
179 179
180#define SMSC_IRCC2_MAX_SIR_SPEED 115200 180#define SMSC_IRCC2_MAX_SIR_SPEED 115200
181#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8 181#define SMSC_IRCC2_FIR_CHIP_IO_EXTENT 8
182#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8 182#define SMSC_IRCC2_SIR_CHIP_IO_EXTENT 8
183#define SMSC_IRCC2_FIFO_SIZE 16 183#define SMSC_IRCC2_FIFO_SIZE 16
184#define SMSC_IRCC2_FIFO_THRESHOLD 64 184#define SMSC_IRCC2_FIFO_THRESHOLD 64
185/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */ 185/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index dc5d089bf184..3d56cf5a4e23 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp. 4 * Copyright (C) 2001 Kyle A. Lucke (klucke@us.ibm.com), IBM Corp.
5 * Substantially cleaned up by: 5 * Substantially cleaned up by:
6 * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation. 6 * Copyright (C) 2003 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
7 * Copyright (C) 2004-2005 Michael Ellerman, IBM Corporation.
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as 10 * modify it under the terms of the GNU General Public License as
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 2234a8f05eb2..7cefe5507b9e 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -1,5 +1,5 @@
1/************************************************************************ 1/************************************************************************
2 * regs.h: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC 2 * regs.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc. 3 * Copyright(c) 2002-2005 Neterion Inc.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
@@ -713,13 +713,16 @@ typedef struct _XENA_dev_config {
713 u64 mc_err_reg; 713 u64 mc_err_reg;
714#define MC_ERR_REG_ECC_DB_ERR_L BIT(14) 714#define MC_ERR_REG_ECC_DB_ERR_L BIT(14)
715#define MC_ERR_REG_ECC_DB_ERR_U BIT(15) 715#define MC_ERR_REG_ECC_DB_ERR_U BIT(15)
716#define MC_ERR_REG_MIRI_ECC_DB_ERR_0 BIT(18)
717#define MC_ERR_REG_MIRI_ECC_DB_ERR_1 BIT(20)
716#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22) 718#define MC_ERR_REG_MIRI_CRI_ERR_0 BIT(22)
717#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) 719#define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23)
718#define MC_ERR_REG_SM_ERR BIT(31) 720#define MC_ERR_REG_SM_ERR BIT(31)
719#define MC_ERR_REG_ECC_ALL_SNG (BIT(6) | \ 721#define MC_ERR_REG_ECC_ALL_SNG (BIT(2) | BIT(3) | BIT(4) | BIT(5) |\
720 BIT(7) | BIT(17) | BIT(19)) 722 BIT(6) | BIT(7) | BIT(17) | BIT(19))
721#define MC_ERR_REG_ECC_ALL_DBL (BIT(14) | \ 723#define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\
722 BIT(15) | BIT(18) | BIT(20)) 724 BIT(13) | BIT(14) | BIT(15) |\
725 BIT(18) | BIT(20))
723 u64 mc_err_mask; 726 u64 mc_err_mask;
724 u64 mc_err_alarm; 727 u64 mc_err_alarm;
725 728
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 5dda043bd9d7..c829e6a2e8a6 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1,5 +1,5 @@
1/************************************************************************ 1/************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc. 3 * Copyright(c) 2002-2005 Neterion Inc.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
@@ -28,7 +28,7 @@
28 * explaination of all the variables. 28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used 29 * rx_ring_num : This can be used to program the number of receive rings used
30 * in the driver. 30 * in the driver.
31 * rx_ring_len: This defines the number of descriptors each ring can have. This 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
@@ -67,7 +67,7 @@
67 67
68/* S2io Driver name & version. */ 68/* S2io Driver name & version. */
69static char s2io_driver_name[] = "Neterion"; 69static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 2.0.3.1"; 70static char s2io_driver_version[] = "Version 2.0.8.1";
71 71
72static inline int RXD_IS_UP2DT(RxD_t *rxdp) 72static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{ 73{
@@ -354,7 +354,7 @@ static int init_shared_mem(struct s2io_nic *nic)
354 int lst_size, lst_per_page; 354 int lst_size, lst_per_page;
355 struct net_device *dev = nic->dev; 355 struct net_device *dev = nic->dev;
356#ifdef CONFIG_2BUFF_MODE 356#ifdef CONFIG_2BUFF_MODE
357 u64 tmp; 357 unsigned long tmp;
358 buffAdd_t *ba; 358 buffAdd_t *ba;
359#endif 359#endif
360 360
@@ -404,7 +404,7 @@ static int init_shared_mem(struct s2io_nic *nic)
404 config->tx_cfg[i].fifo_len - 1; 404 config->tx_cfg[i].fifo_len - 1;
405 mac_control->fifos[i].fifo_no = i; 405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic; 406 mac_control->fifos[i].nic = nic;
407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS; 407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
408 408
409 for (j = 0; j < page_num; j++) { 409 for (j = 0; j < page_num; j++) {
410 int k = 0; 410 int k = 0;
@@ -418,6 +418,26 @@ static int init_shared_mem(struct s2io_nic *nic)
418 DBG_PRINT(ERR_DBG, "failed for TxDL\n"); 418 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
419 return -ENOMEM; 419 return -ENOMEM;
420 } 420 }
421 /* If we got a zero DMA address(can happen on
422 * certain platforms like PPC), reallocate.
423 * Store virtual address of page we don't want,
424 * to be freed later.
425 */
426 if (!tmp_p) {
427 mac_control->zerodma_virt_addr = tmp_v;
428 DBG_PRINT(INIT_DBG,
429 "%s: Zero DMA address for TxDL. ", dev->name);
430 DBG_PRINT(INIT_DBG,
431 "Virtual address %llx\n", (u64)tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev,
433 PAGE_SIZE, &tmp_p);
434 if (!tmp_v) {
435 DBG_PRINT(ERR_DBG,
436 "pci_alloc_consistent ");
437 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
438 return -ENOMEM;
439 }
440 }
421 while (k < lst_per_page) { 441 while (k < lst_per_page) {
422 int l = (j * lst_per_page) + k; 442 int l = (j * lst_per_page) + k;
423 if (l == config->tx_cfg[i].fifo_len) 443 if (l == config->tx_cfg[i].fifo_len)
@@ -542,18 +562,18 @@ static int init_shared_mem(struct s2io_nic *nic)
542 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 562 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
543 if (!ba->ba_0_org) 563 if (!ba->ba_0_org)
544 return -ENOMEM; 564 return -ENOMEM;
545 tmp = (u64) ba->ba_0_org; 565 tmp = (unsigned long) ba->ba_0_org;
546 tmp += ALIGN_SIZE; 566 tmp += ALIGN_SIZE;
547 tmp &= ~((u64) ALIGN_SIZE); 567 tmp &= ~((unsigned long) ALIGN_SIZE);
548 ba->ba_0 = (void *) tmp; 568 ba->ba_0 = (void *) tmp;
549 569
550 ba->ba_1_org = (void *) kmalloc 570 ba->ba_1_org = (void *) kmalloc
551 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 571 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
552 if (!ba->ba_1_org) 572 if (!ba->ba_1_org)
553 return -ENOMEM; 573 return -ENOMEM;
554 tmp = (u64) ba->ba_1_org; 574 tmp = (unsigned long) ba->ba_1_org;
555 tmp += ALIGN_SIZE; 575 tmp += ALIGN_SIZE;
556 tmp &= ~((u64) ALIGN_SIZE); 576 tmp &= ~((unsigned long) ALIGN_SIZE);
557 ba->ba_1 = (void *) tmp; 577 ba->ba_1 = (void *) tmp;
558 k++; 578 k++;
559 } 579 }
@@ -600,7 +620,7 @@ static void free_shared_mem(struct s2io_nic *nic)
600 mac_info_t *mac_control; 620 mac_info_t *mac_control;
601 struct config_param *config; 621 struct config_param *config;
602 int lst_size, lst_per_page; 622 int lst_size, lst_per_page;
603 623 struct net_device *dev = nic->dev;
604 624
605 if (!nic) 625 if (!nic)
606 return; 626 return;
@@ -616,9 +636,10 @@ static void free_shared_mem(struct s2io_nic *nic)
616 lst_per_page); 636 lst_per_page);
617 for (j = 0; j < page_num; j++) { 637 for (j = 0; j < page_num; j++) {
618 int mem_blks = (j * lst_per_page); 638 int mem_blks = (j * lst_per_page);
619 if ((!mac_control->fifos[i].list_info) || 639 if (!mac_control->fifos[i].list_info)
620 (!mac_control->fifos[i].list_info[mem_blks]. 640 return;
621 list_virt_addr)) 641 if (!mac_control->fifos[i].list_info[mem_blks].
642 list_virt_addr)
622 break; 643 break;
623 pci_free_consistent(nic->pdev, PAGE_SIZE, 644 pci_free_consistent(nic->pdev, PAGE_SIZE,
624 mac_control->fifos[i]. 645 mac_control->fifos[i].
@@ -628,6 +649,18 @@ static void free_shared_mem(struct s2io_nic *nic)
628 list_info[mem_blks]. 649 list_info[mem_blks].
629 list_phy_addr); 650 list_phy_addr);
630 } 651 }
652 /* If we got a zero DMA address during allocation,
653 * free the page now
654 */
655 if (mac_control->zerodma_virt_addr) {
656 pci_free_consistent(nic->pdev, PAGE_SIZE,
657 mac_control->zerodma_virt_addr,
658 (dma_addr_t)0);
659 DBG_PRINT(INIT_DBG,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name);
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n",
662 (u64)(mac_control->zerodma_virt_addr));
663 }
631 kfree(mac_control->fifos[i].list_info); 664 kfree(mac_control->fifos[i].list_info);
632 } 665 }
633 666
@@ -2479,9 +2512,10 @@ static void rx_intr_handler(ring_info_t *ring_data)
2479#endif 2512#endif
2480 spin_lock(&nic->rx_lock); 2513 spin_lock(&nic->rx_lock);
2481 if (atomic_read(&nic->card_state) == CARD_DOWN) { 2514 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2482 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n", 2515 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2483 __FUNCTION__, dev->name); 2516 __FUNCTION__, dev->name);
2484 spin_unlock(&nic->rx_lock); 2517 spin_unlock(&nic->rx_lock);
2518 return;
2485 } 2519 }
2486 2520
2487 get_info = ring_data->rx_curr_get_info; 2521 get_info = ring_data->rx_curr_get_info;
@@ -2596,8 +2630,14 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2596 if (txdlp->Control_1 & TXD_T_CODE) { 2630 if (txdlp->Control_1 & TXD_T_CODE) {
2597 unsigned long long err; 2631 unsigned long long err;
2598 err = txdlp->Control_1 & TXD_T_CODE; 2632 err = txdlp->Control_1 & TXD_T_CODE;
2599 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", 2633 if ((err >> 48) == 0xA) {
2600 err); 2634 DBG_PRINT(TX_DBG, "TxD returned due \
2635 to loss of link\n");
2636 }
2637 else {
2638 DBG_PRINT(ERR_DBG, "***TxD error \
2639 %llx\n", err);
2640 }
2601 } 2641 }
2602 2642
2603 skb = (struct sk_buff *) ((unsigned long) 2643 skb = (struct sk_buff *) ((unsigned long)
@@ -2689,12 +2729,16 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2689 if (val64 & MC_ERR_REG_ECC_ALL_DBL) { 2729 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2690 nic->mac_control.stats_info->sw_stat. 2730 nic->mac_control.stats_info->sw_stat.
2691 double_ecc_errs++; 2731 double_ecc_errs++;
2692 DBG_PRINT(ERR_DBG, "%s: Device indicates ", 2732 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2693 dev->name); 2733 dev->name);
2694 DBG_PRINT(ERR_DBG, "double ECC error!!\n"); 2734 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2695 if (nic->device_type != XFRAME_II_DEVICE) { 2735 if (nic->device_type != XFRAME_II_DEVICE) {
2696 netif_stop_queue(dev); 2736 /* Reset XframeI only if critical error */
2697 schedule_work(&nic->rst_timer_task); 2737 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2738 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2739 netif_stop_queue(dev);
2740 schedule_work(&nic->rst_timer_task);
2741 }
2698 } 2742 }
2699 } else { 2743 } else {
2700 nic->mac_control.stats_info->sw_stat. 2744 nic->mac_control.stats_info->sw_stat.
@@ -2706,7 +2750,8 @@ static void alarm_intr_handler(struct s2io_nic *nic)
2706 val64 = readq(&bar0->serr_source); 2750 val64 = readq(&bar0->serr_source);
2707 if (val64 & SERR_SOURCE_ANY) { 2751 if (val64 & SERR_SOURCE_ANY) {
2708 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name); 2752 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2709 DBG_PRINT(ERR_DBG, "serious error!!\n"); 2753 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2754 (unsigned long long)val64);
2710 netif_stop_queue(dev); 2755 netif_stop_queue(dev);
2711 schedule_work(&nic->rst_timer_task); 2756 schedule_work(&nic->rst_timer_task);
2712 } 2757 }
@@ -3130,7 +3175,7 @@ int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3130 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1; 3175 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3131 /* Avoid "put" pointer going beyond "get" pointer */ 3176 /* Avoid "put" pointer going beyond "get" pointer */
3132 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) { 3177 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3133 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n"); 3178 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3134 netif_stop_queue(dev); 3179 netif_stop_queue(dev);
3135 dev_kfree_skb(skb); 3180 dev_kfree_skb(skb);
3136 spin_unlock_irqrestore(&sp->tx_lock, flags); 3181 spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -3528,7 +3573,7 @@ static void s2io_set_multicast(struct net_device *dev)
3528 3573
3529 val64 = readq(&bar0->mac_cfg); 3574 val64 = readq(&bar0->mac_cfg);
3530 sp->promisc_flg = 1; 3575 sp->promisc_flg = 1;
3531 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n", 3576 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
3532 dev->name); 3577 dev->name);
3533 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) { 3578 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3534 /* Remove the NIC from promiscuous mode */ 3579 /* Remove the NIC from promiscuous mode */
@@ -3543,7 +3588,7 @@ static void s2io_set_multicast(struct net_device *dev)
3543 3588
3544 val64 = readq(&bar0->mac_cfg); 3589 val64 = readq(&bar0->mac_cfg);
3545 sp->promisc_flg = 0; 3590 sp->promisc_flg = 0;
3546 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n", 3591 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
3547 dev->name); 3592 dev->name);
3548 } 3593 }
3549 3594
@@ -5325,7 +5370,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5325 break; 5370 break;
5326 } 5371 }
5327 } 5372 }
5328 config->max_txds = MAX_SKB_FRAGS; 5373 config->max_txds = MAX_SKB_FRAGS + 1;
5329 5374
5330 /* Rx side parameters. */ 5375 /* Rx side parameters. */
5331 if (rx_ring_sz[0] == 0) 5376 if (rx_ring_sz[0] == 0)
@@ -5525,9 +5570,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5525 if (sp->device_type & XFRAME_II_DEVICE) { 5570 if (sp->device_type & XFRAME_II_DEVICE) {
5526 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ", 5571 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5527 dev->name); 5572 dev->name);
5528 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n", 5573 DBG_PRINT(ERR_DBG, "(rev %d), %s",
5529 get_xena_rev_id(sp->pdev), 5574 get_xena_rev_id(sp->pdev),
5530 s2io_driver_version); 5575 s2io_driver_version);
5576#ifdef CONFIG_2BUFF_MODE
5577 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5578#endif
5579
5580 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5531 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", 5581 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5532 sp->def_mac_addr[0].mac_addr[0], 5582 sp->def_mac_addr[0].mac_addr[0],
5533 sp->def_mac_addr[0].mac_addr[1], 5583 sp->def_mac_addr[0].mac_addr[1],
@@ -5544,9 +5594,13 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5544 } else { 5594 } else {
5545 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ", 5595 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5546 dev->name); 5596 dev->name);
5547 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n", 5597 DBG_PRINT(ERR_DBG, "(rev %d), %s",
5548 get_xena_rev_id(sp->pdev), 5598 get_xena_rev_id(sp->pdev),
5549 s2io_driver_version); 5599 s2io_driver_version);
5600#ifdef CONFIG_2BUFF_MODE
5601 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5602#endif
5603 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5550 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", 5604 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5551 sp->def_mac_addr[0].mac_addr[0], 5605 sp->def_mac_addr[0].mac_addr[0],
5552 sp->def_mac_addr[0].mac_addr[1], 5606 sp->def_mac_addr[0].mac_addr[1],
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index bc64d967f080..89151cb52181 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -1,5 +1,5 @@
1/************************************************************************ 1/************************************************************************
2 * s2io.h: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC 2 * s2io.h: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc. 3 * Copyright(c) 2002-2005 Neterion Inc.
4 4
5 * This software may be used and distributed according to the terms of 5 * This software may be used and distributed according to the terms of
@@ -622,6 +622,9 @@ typedef struct mac_info {
622 /* Fifo specific structure */ 622 /* Fifo specific structure */
623 fifo_info_t fifos[MAX_TX_FIFOS]; 623 fifo_info_t fifos[MAX_TX_FIFOS];
624 624
625 /* Save virtual address of TxD page with zero DMA addr(if any) */
626 void *zerodma_virt_addr;
627
625/* rx side stuff */ 628/* rx side stuff */
626 /* Ring specific structure */ 629 /* Ring specific structure */
627 ring_info_t rings[MAX_RX_RINGS]; 630 ring_info_t rings[MAX_RX_RINGS];
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index a9b06b8d8e3f..ac9ce6509eee 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -986,7 +986,7 @@ static const char * chip_ids[ 16 ] = {
986 }) 986 })
987#endif 987#endif
988 988
989#if SMC_CAN_USE_DATACS 989#ifdef SMC_CAN_USE_DATACS
990#define SMC_PUSH_DATA(p, l) \ 990#define SMC_PUSH_DATA(p, l) \
991 if ( lp->datacs ) { \ 991 if ( lp->datacs ) { \
992 unsigned char *__ptr = (p); \ 992 unsigned char *__ptr = (p); \
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
new file mode 100644
index 000000000000..4e19220473d0
--- /dev/null
+++ b/drivers/net/spider_net.c
@@ -0,0 +1,2334 @@
1/*
2 * Network device driver for Cell Processor-Based Blade
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/config.h>
25
26#include <linux/compiler.h>
27#include <linux/crc32.h>
28#include <linux/delay.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/firmware.h>
32#include <linux/if_vlan.h>
33#include <linux/init.h>
34#include <linux/ioport.h>
35#include <linux/ip.h>
36#include <linux/kernel.h>
37#include <linux/mii.h>
38#include <linux/module.h>
39#include <linux/netdevice.h>
40#include <linux/device.h>
41#include <linux/pci.h>
42#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/tcp.h>
45#include <linux/types.h>
46#include <linux/wait.h>
47#include <linux/workqueue.h>
48#include <asm/bitops.h>
49#include <asm/pci-bridge.h>
50#include <net/checksum.h>
51
52#include "spider_net.h"
53
54MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
55 "<Jens.Osterkamp@de.ibm.com>");
56MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
57MODULE_LICENSE("GPL");
58
59static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
60static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
61
62module_param(rx_descriptors, int, 0644);
63module_param(tx_descriptors, int, 0644);
64
65MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
66 "in rx chains");
67MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
68 "in tx chain");
69
70char spider_net_driver_name[] = "spidernet";
71
72static struct pci_device_id spider_net_pci_tbl[] = {
73 { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
74 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
75 { 0, }
76};
77
78MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
79
80/**
81 * spider_net_read_reg - reads an SMMIO register of a card
82 * @card: device structure
83 * @reg: register to read from
84 *
85 * returns the content of the specified SMMIO register.
86 */
87static u32
88spider_net_read_reg(struct spider_net_card *card, u32 reg)
89{
90 u32 value;
91
92 value = readl(card->regs + reg);
93 value = le32_to_cpu(value);
94
95 return value;
96}
97
98/**
99 * spider_net_write_reg - writes to an SMMIO register of a card
100 * @card: device structure
101 * @reg: register to write to
102 * @value: value to write into the specified SMMIO register
103 */
104static void
105spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
106{
107 value = cpu_to_le32(value);
108 writel(value, card->regs + reg);
109}
110
111/**
112 * spider_net_write_reg_sync - writes to an SMMIO register of a card
113 * @card: device structure
114 * @reg: register to write to
115 * @value: value to write into the specified SMMIO register
116 *
117 * Unlike spider_net_write_reg, this will also make sure the
118 * data arrives on the card by reading the reg again.
119 */
120static void
121spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
122{
123 value = cpu_to_le32(value);
124 writel(value, card->regs + reg);
125 (void)readl(card->regs + reg);
126}
127
128/**
129 * spider_net_rx_irq_off - switch off rx irq on this spider card
130 * @card: device structure
131 *
132 * switches off rx irq by masking them out in the GHIINTnMSK register
133 */
134static void
135spider_net_rx_irq_off(struct spider_net_card *card)
136{
137 u32 regvalue;
138 unsigned long flags;
139
140 spin_lock_irqsave(&card->intmask_lock, flags);
141 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
142 regvalue &= ~SPIDER_NET_RXINT;
143 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
144 spin_unlock_irqrestore(&card->intmask_lock, flags);
145}
146
147/** spider_net_write_phy - write to phy register
148 * @netdev: adapter to be written to
149 * @mii_id: id of MII
150 * @reg: PHY register
151 * @val: value to be written to phy register
152 *
153 * spider_net_write_phy_register writes to an arbitrary PHY
154 * register via the spider GPCWOPCMD register. We assume the queue does
155 * not run full (not more than 15 commands outstanding).
156 **/
157static void
158spider_net_write_phy(struct net_device *netdev, int mii_id,
159 int reg, int val)
160{
161 struct spider_net_card *card = netdev_priv(netdev);
162 u32 writevalue;
163
164 writevalue = ((u32)mii_id << 21) |
165 ((u32)reg << 16) | ((u32)val);
166
167 spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
168}
169
170/** spider_net_read_phy - read from phy register
171 * @netdev: network device to be read from
172 * @mii_id: id of MII
173 * @reg: PHY register
174 *
175 * Returns value read from PHY register
176 *
177 * spider_net_write_phy reads from an arbitrary PHY
178 * register via the spider GPCROPCMD register
179 **/
180static int
181spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
182{
183 struct spider_net_card *card = netdev_priv(netdev);
184 u32 readvalue;
185
186 readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
187 spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
188
189 /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
190 * interrupt, as we poll for the completion of the read operation
191 * in spider_net_read_phy. Should take about 50 us */
192 do {
193 readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
194 } while (readvalue & SPIDER_NET_GPREXEC);
195
196 readvalue &= SPIDER_NET_GPRDAT_MASK;
197
198 return readvalue;
199}
200
201/**
202 * spider_net_rx_irq_on - switch on rx irq on this spider card
203 * @card: device structure
204 *
205 * switches on rx irq by enabling them in the GHIINTnMSK register
206 */
207static void
208spider_net_rx_irq_on(struct spider_net_card *card)
209{
210 u32 regvalue;
211 unsigned long flags;
212
213 spin_lock_irqsave(&card->intmask_lock, flags);
214 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
215 regvalue |= SPIDER_NET_RXINT;
216 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
217 spin_unlock_irqrestore(&card->intmask_lock, flags);
218}
219
220/**
221 * spider_net_tx_irq_off - switch off tx irq on this spider card
222 * @card: device structure
223 *
224 * switches off tx irq by masking them out in the GHIINTnMSK register
225 */
226static void
227spider_net_tx_irq_off(struct spider_net_card *card)
228{
229 u32 regvalue;
230 unsigned long flags;
231
232 spin_lock_irqsave(&card->intmask_lock, flags);
233 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
234 regvalue &= ~SPIDER_NET_TXINT;
235 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
236 spin_unlock_irqrestore(&card->intmask_lock, flags);
237}
238
239/**
240 * spider_net_tx_irq_on - switch on tx irq on this spider card
241 * @card: device structure
242 *
243 * switches on tx irq by enabling them in the GHIINTnMSK register
244 */
245static void
246spider_net_tx_irq_on(struct spider_net_card *card)
247{
248 u32 regvalue;
249 unsigned long flags;
250
251 spin_lock_irqsave(&card->intmask_lock, flags);
252 regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
253 regvalue |= SPIDER_NET_TXINT;
254 spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
255 spin_unlock_irqrestore(&card->intmask_lock, flags);
256}
257
258/**
259 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
260 * @card: card structure
261 *
262 * spider_net_set_promisc sets the unicast destination address filter and
263 * thus either allows for non-promisc mode or promisc mode
264 */
265static void
266spider_net_set_promisc(struct spider_net_card *card)
267{
268 u32 macu, macl;
269 struct net_device *netdev = card->netdev;
270
271 if (netdev->flags & IFF_PROMISC) {
272 /* clear destination entry 0 */
273 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
274 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
275 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
276 SPIDER_NET_PROMISC_VALUE);
277 } else {
278 macu = netdev->dev_addr[0];
279 macu <<= 8;
280 macu |= netdev->dev_addr[1];
281 memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
282
283 macu |= SPIDER_NET_UA_DESCR_VALUE;
284 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
285 spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
286 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
287 SPIDER_NET_NONPROMISC_VALUE);
288 }
289}
290
291/**
292 * spider_net_get_mac_address - read mac address from spider card
293 * @card: device structure
294 *
295 * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
296 */
297static int
298spider_net_get_mac_address(struct net_device *netdev)
299{
300 struct spider_net_card *card = netdev_priv(netdev);
301 u32 macl, macu;
302
303 macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
304 macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
305
306 netdev->dev_addr[0] = (macu >> 24) & 0xff;
307 netdev->dev_addr[1] = (macu >> 16) & 0xff;
308 netdev->dev_addr[2] = (macu >> 8) & 0xff;
309 netdev->dev_addr[3] = macu & 0xff;
310 netdev->dev_addr[4] = (macl >> 8) & 0xff;
311 netdev->dev_addr[5] = macl & 0xff;
312
313 if (!is_valid_ether_addr(&netdev->dev_addr[0]))
314 return -EINVAL;
315
316 return 0;
317}
318
319/**
320 * spider_net_get_descr_status -- returns the status of a descriptor
321 * @descr: descriptor to look at
322 *
323 * returns the status as in the dmac_cmd_status field of the descriptor
324 */
325static enum spider_net_descr_status
326spider_net_get_descr_status(struct spider_net_descr *descr)
327{
328 u32 cmd_status;
329 rmb();
330 cmd_status = descr->dmac_cmd_status;
331 rmb();
332 cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
333 /* no need to mask out any bits, as cmd_status is 32 bits wide only
334 * (and unsigned) */
335 return cmd_status;
336}
337
338/**
339 * spider_net_set_descr_status -- sets the status of a descriptor
340 * @descr: descriptor to change
341 * @status: status to set in the descriptor
342 *
343 * changes the status to the specified value. Doesn't change other bits
344 * in the status
345 */
346static void
347spider_net_set_descr_status(struct spider_net_descr *descr,
348 enum spider_net_descr_status status)
349{
350 u32 cmd_status;
351 /* read the status */
352 mb();
353 cmd_status = descr->dmac_cmd_status;
354 /* clean the upper 4 bits */
355 cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
356 /* add the status to it */
357 cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
358 /* and write it back */
359 descr->dmac_cmd_status = cmd_status;
360 wmb();
361}
362
363/**
364 * spider_net_free_chain - free descriptor chain
365 * @card: card structure
366 * @chain: address of chain
367 *
368 */
369static void
370spider_net_free_chain(struct spider_net_card *card,
371 struct spider_net_descr_chain *chain)
372{
373 struct spider_net_descr *descr;
374
375 for (descr = chain->tail; !descr->bus_addr; descr = descr->next) {
376 pci_unmap_single(card->pdev, descr->bus_addr,
377 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
378 descr->bus_addr = 0;
379 }
380}
381
382/**
383 * spider_net_init_chain - links descriptor chain
384 * @card: card structure
385 * @chain: address of chain
386 * @start_descr: address of descriptor array
387 * @no: number of descriptors
388 *
389 * we manage a circular list that mirrors the hardware structure,
390 * except that the hardware uses bus addresses.
391 *
392 * returns 0 on success, <0 on failure
393 */
394static int
395spider_net_init_chain(struct spider_net_card *card,
396 struct spider_net_descr_chain *chain,
397 struct spider_net_descr *start_descr, int no)
398{
399 int i;
400 struct spider_net_descr *descr;
401
402 spin_lock_init(&card->chain_lock);
403
404 descr = start_descr;
405 memset(descr, 0, sizeof(*descr) * no);
406
407 /* set up the hardware pointers in each descriptor */
408 for (i=0; i<no; i++, descr++) {
409 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
410
411 descr->bus_addr =
412 pci_map_single(card->pdev, descr,
413 SPIDER_NET_DESCR_SIZE,
414 PCI_DMA_BIDIRECTIONAL);
415
416 if (descr->bus_addr == DMA_ERROR_CODE)
417 goto iommu_error;
418
419 descr->next = descr + 1;
420 descr->prev = descr - 1;
421
422 }
423 /* do actual circular list */
424 (descr-1)->next = start_descr;
425 start_descr->prev = descr-1;
426
427 descr = start_descr;
428 for (i=0; i < no; i++, descr++) {
429 descr->next_descr_addr = descr->next->bus_addr;
430 }
431
432 chain->head = start_descr;
433 chain->tail = start_descr;
434
435 return 0;
436
437iommu_error:
438 descr = start_descr;
439 for (i=0; i < no; i++, descr++)
440 if (descr->bus_addr)
441 pci_unmap_single(card->pdev, descr->bus_addr,
442 SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
443 return -ENOMEM;
444}
445
446/**
447 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
448 * @card: card structure
449 *
450 * returns 0 on success, <0 on failure
451 */
452static void
453spider_net_free_rx_chain_contents(struct spider_net_card *card)
454{
455 struct spider_net_descr *descr;
456
457 descr = card->rx_chain.head;
458 while (descr->next != card->rx_chain.head) {
459 if (descr->skb) {
460 dev_kfree_skb(descr->skb);
461 pci_unmap_single(card->pdev, descr->buf_addr,
462 SPIDER_NET_MAX_MTU,
463 PCI_DMA_BIDIRECTIONAL);
464 }
465 descr = descr->next;
466 }
467}
468
469/**
470 * spider_net_prepare_rx_descr - reinitializes a rx descriptor
471 * @card: card structure
472 * @descr: descriptor to re-init
473 *
474 * return 0 on succes, <0 on failure
475 *
476 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
477 * Activate the descriptor state-wise
478 */
479static int
480spider_net_prepare_rx_descr(struct spider_net_card *card,
481 struct spider_net_descr *descr)
482{
483 int error = 0;
484 int offset;
485 int bufsize;
486
487 /* we need to round up the buffer size to a multiple of 128 */
488 bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) &
489 (~(SPIDER_NET_RXBUF_ALIGN - 1));
490
491 /* and we need to have it 128 byte aligned, therefore we allocate a
492 * bit more */
493 /* allocate an skb */
494 descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
495 if (!descr->skb) {
496 if (net_ratelimit())
497 if (netif_msg_rx_err(card))
498 pr_err("Not enough memory to allocate "
499 "rx buffer\n");
500 return -ENOMEM;
501 }
502 descr->buf_size = bufsize;
503 descr->result_size = 0;
504 descr->valid_size = 0;
505 descr->data_status = 0;
506 descr->data_error = 0;
507
508 offset = ((unsigned long)descr->skb->data) &
509 (SPIDER_NET_RXBUF_ALIGN - 1);
510 if (offset)
511 skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
512 /* io-mmu-map the skb */
513 descr->buf_addr = pci_map_single(card->pdev, descr->skb->data,
514 SPIDER_NET_MAX_MTU,
515 PCI_DMA_BIDIRECTIONAL);
516 if (descr->buf_addr == DMA_ERROR_CODE) {
517 dev_kfree_skb_any(descr->skb);
518 if (netif_msg_rx_err(card))
519 pr_err("Could not iommu-map rx buffer\n");
520 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
521 } else {
522 descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED;
523 }
524
525 return error;
526}
527
528/**
529 * spider_net_enable_rxctails - sets RX dmac chain tail addresses
530 * @card: card structure
531 *
532 * spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the
533 * chip by writing to the appropriate register. DMA is enabled in
534 * spider_net_enable_rxdmac.
535 */
536static void
537spider_net_enable_rxchtails(struct spider_net_card *card)
538{
539 /* assume chain is aligned correctly */
540 spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
541 card->rx_chain.tail->bus_addr);
542}
543
544/**
545 * spider_net_enable_rxdmac - enables a receive DMA controller
546 * @card: card structure
547 *
548 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
549 * in the GDADMACCNTR register
550 */
551static void
552spider_net_enable_rxdmac(struct spider_net_card *card)
553{
554 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
555 SPIDER_NET_DMA_RX_VALUE);
556}
557
558/**
559 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
560 * @card: card structure
561 *
562 * refills descriptors in all chains (last used chain first): allocates skbs
563 * and iommu-maps them.
564 */
565static void
566spider_net_refill_rx_chain(struct spider_net_card *card)
567{
568 struct spider_net_descr_chain *chain;
569 int count = 0;
570 unsigned long flags;
571
572 chain = &card->rx_chain;
573
574 spin_lock_irqsave(&card->chain_lock, flags);
575 while (spider_net_get_descr_status(chain->head) ==
576 SPIDER_NET_DESCR_NOT_IN_USE) {
577 if (spider_net_prepare_rx_descr(card, chain->head))
578 break;
579 count++;
580 chain->head = chain->head->next;
581 }
582 spin_unlock_irqrestore(&card->chain_lock, flags);
583
584 /* could be optimized, only do that, if we know the DMA processing
585 * has terminated */
586 if (count)
587 spider_net_enable_rxdmac(card);
588}
589
590/**
591 * spider_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains
592 * @card: card structure
593 *
594 * returns 0 on success, <0 on failure
595 */
596static int
597spider_net_alloc_rx_skbs(struct spider_net_card *card)
598{
599 int result;
600 struct spider_net_descr_chain *chain;
601
602 result = -ENOMEM;
603
604 chain = &card->rx_chain;
605 /* put at least one buffer into the chain. if this fails,
606 * we've got a problem. if not, spider_net_refill_rx_chain
607 * will do the rest at the end of this function */
608 if (spider_net_prepare_rx_descr(card, chain->head))
609 goto error;
610 else
611 chain->head = chain->head->next;
612
613 /* this will allocate the rest of the rx buffers; if not, it's
614 * business as usual later on */
615 spider_net_refill_rx_chain(card);
616 return 0;
617
618error:
619 spider_net_free_rx_chain_contents(card);
620 return result;
621}
622
623/**
624 * spider_net_release_tx_descr - processes a used tx descriptor
625 * @card: card structure
626 * @descr: descriptor to release
627 *
628 * releases a used tx descriptor (unmapping, freeing of skb)
629 */
630static void
631spider_net_release_tx_descr(struct spider_net_card *card,
632 struct spider_net_descr *descr)
633{
634 struct sk_buff *skb;
635
636 /* unmap the skb */
637 skb = descr->skb;
638 pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
639 PCI_DMA_BIDIRECTIONAL);
640
641 dev_kfree_skb_any(skb);
642
643 /* set status to not used */
644 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
645}
646
647/**
648 * spider_net_release_tx_chain - processes sent tx descriptors
649 * @card: adapter structure
650 * @brutal: if set, don't care about whether descriptor seems to be in use
651 *
652 * releases the tx descriptors that spider has finished with (if non-brutal)
653 * or simply release tx descriptors (if brutal)
654 */
655static void
656spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
657{
658 struct spider_net_descr_chain *tx_chain = &card->tx_chain;
659 enum spider_net_descr_status status;
660
661 spider_net_tx_irq_off(card);
662
663 /* no lock for chain needed, if this is only executed once at a time */
664again:
665 for (;;) {
666 status = spider_net_get_descr_status(tx_chain->tail);
667 switch (status) {
668 case SPIDER_NET_DESCR_CARDOWNED:
669 if (!brutal) goto out;
670 /* fallthrough, if we release the descriptors
671 * brutally (then we don't care about
672 * SPIDER_NET_DESCR_CARDOWNED) */
673 case SPIDER_NET_DESCR_RESPONSE_ERROR:
674 case SPIDER_NET_DESCR_PROTECTION_ERROR:
675 case SPIDER_NET_DESCR_FORCE_END:
676 if (netif_msg_tx_err(card))
677 pr_err("%s: forcing end of tx descriptor "
678 "with status x%02x\n",
679 card->netdev->name, status);
680 card->netdev_stats.tx_dropped++;
681 break;
682
683 case SPIDER_NET_DESCR_COMPLETE:
684 card->netdev_stats.tx_packets++;
685 card->netdev_stats.tx_bytes +=
686 tx_chain->tail->skb->len;
687 break;
688
689 default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
690 goto out;
691 }
692 spider_net_release_tx_descr(card, tx_chain->tail);
693 tx_chain->tail = tx_chain->tail->next;
694 }
695out:
696 netif_wake_queue(card->netdev);
697
698 if (!brutal) {
699 /* switch on tx irqs (while we are still in the interrupt
700 * handler, so we don't get an interrupt), check again
701 * for done descriptors. This results in fewer interrupts */
702 spider_net_tx_irq_on(card);
703 status = spider_net_get_descr_status(tx_chain->tail);
704 switch (status) {
705 case SPIDER_NET_DESCR_RESPONSE_ERROR:
706 case SPIDER_NET_DESCR_PROTECTION_ERROR:
707 case SPIDER_NET_DESCR_FORCE_END:
708 case SPIDER_NET_DESCR_COMPLETE:
709 goto again;
710 default:
711 break;
712 }
713 }
714
715}
716
717/**
718 * spider_net_get_multicast_hash - generates hash for multicast filter table
719 * @addr: multicast address
720 *
721 * returns the hash value.
722 *
723 * spider_net_get_multicast_hash calculates a hash value for a given multicast
724 * address, that is used to set the multicast filter tables
725 */
726static u8
727spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
728{
729 /* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
730 * ff:ff:ff:ff:ff:ff must result in 0xfd */
731 u32 crc;
732 u8 hash;
733
734 crc = crc32_be(~0, addr, netdev->addr_len);
735
736 hash = (crc >> 27);
737 hash <<= 3;
738 hash |= crc & 7;
739
740 return hash;
741}
742
743/**
744 * spider_net_set_multi - sets multicast addresses and promisc flags
745 * @netdev: interface device structure
746 *
747 * spider_net_set_multi configures multicast addresses as needed for the
748 * netdev interface. It also sets up multicast, allmulti and promisc
749 * flags appropriately
750 */
751static void
752spider_net_set_multi(struct net_device *netdev)
753{
754 struct dev_mc_list *mc;
755 u8 hash;
756 int i;
757 u32 reg;
758 struct spider_net_card *card = netdev_priv(netdev);
759 unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
760 {0, };
761
762 spider_net_set_promisc(card);
763
764 if (netdev->flags & IFF_ALLMULTI) {
765 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
766 set_bit(i, bitmask);
767 }
768 goto write_hash;
769 }
770
771 /* well, we know, what the broadcast hash value is: it's xfd
772 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
773 set_bit(0xfd, bitmask);
774
775 for (mc = netdev->mc_list; mc; mc = mc->next) {
776 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
777 set_bit(hash, bitmask);
778 }
779
780write_hash:
781 for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
782 reg = 0;
783 if (test_bit(i * 4, bitmask))
784 reg += 0x08;
785 reg <<= 8;
786 if (test_bit(i * 4 + 1, bitmask))
787 reg += 0x08;
788 reg <<= 8;
789 if (test_bit(i * 4 + 2, bitmask))
790 reg += 0x08;
791 reg <<= 8;
792 if (test_bit(i * 4 + 3, bitmask))
793 reg += 0x08;
794
795 spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
796 }
797}
798
799/**
800 * spider_net_disable_rxdmac - disables the receive DMA controller
801 * @card: card structure
802 *
803 * spider_net_disable_rxdmac terminates processing on the DMA controller by
804 * turing off DMA and issueing a force end
805 */
806static void
807spider_net_disable_rxdmac(struct spider_net_card *card)
808{
809 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
810 SPIDER_NET_DMA_RX_FEND_VALUE);
811}
812
813/**
814 * spider_net_stop - called upon ifconfig down
815 * @netdev: interface device structure
816 *
817 * always returns 0
818 */
819int
820spider_net_stop(struct net_device *netdev)
821{
822 struct spider_net_card *card = netdev_priv(netdev);
823
824 netif_poll_disable(netdev);
825 netif_carrier_off(netdev);
826 netif_stop_queue(netdev);
827
828 /* disable/mask all interrupts */
829 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
830 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
831 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
832
833 /* free_irq(netdev->irq, netdev);*/
834 free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
835
836 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
837 SPIDER_NET_DMA_TX_FEND_VALUE);
838
839 /* turn off DMA, force end */
840 spider_net_disable_rxdmac(card);
841
842 /* release chains */
843 spider_net_release_tx_chain(card, 1);
844
845 spider_net_free_chain(card, &card->tx_chain);
846 spider_net_free_chain(card, &card->rx_chain);
847
848 return 0;
849}
850
851/**
852 * spider_net_get_next_tx_descr - returns the next available tx descriptor
853 * @card: device structure to get descriptor from
854 *
855 * returns the address of the next descriptor, or NULL if not available.
856 */
857static struct spider_net_descr *
858spider_net_get_next_tx_descr(struct spider_net_card *card)
859{
860 /* check, if head points to not-in-use descr */
861 if ( spider_net_get_descr_status(card->tx_chain.head) ==
862 SPIDER_NET_DESCR_NOT_IN_USE ) {
863 return card->tx_chain.head;
864 } else {
865 return NULL;
866 }
867}
868
869/**
870 * spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
871 * @descr: descriptor structure to fill out
872 * @skb: packet to consider
873 *
874 * fills out the command and status field of the descriptor structure,
875 * depending on hardware checksum settings. This function assumes a wmb()
876 * has executed before.
877 */
878static void
879spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
880 struct sk_buff *skb)
881{
882 if (skb->ip_summed != CHECKSUM_HW) {
883 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
884 return;
885 }
886
887 /* is packet ip?
888 * if yes: tcp? udp? */
889 if (skb->protocol == htons(ETH_P_IP)) {
890 if (skb->nh.iph->protocol == IPPROTO_TCP) {
891 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
892 } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
893 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
894 } else { /* the stack should checksum non-tcp and non-udp
895 packets on his own: NETIF_F_IP_CSUM */
896 descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
897 }
898 }
899}
900
901/**
902 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
903 * @card: card structure
904 * @descr: descriptor structure to fill out
905 * @skb: packet to use
906 *
907 * returns 0 on success, <0 on failure.
908 *
909 * fills out the descriptor structure with skb data and len. Copies data,
910 * if needed (32bit DMA!)
911 */
912static int
913spider_net_prepare_tx_descr(struct spider_net_card *card,
914 struct spider_net_descr *descr,
915 struct sk_buff *skb)
916{
917 descr->buf_addr = pci_map_single(card->pdev, skb->data,
918 skb->len, PCI_DMA_BIDIRECTIONAL);
919 if (descr->buf_addr == DMA_ERROR_CODE) {
920 if (netif_msg_tx_err(card))
921 pr_err("could not iommu-map packet (%p, %i). "
922 "Dropping packet\n", skb->data, skb->len);
923 return -ENOMEM;
924 }
925
926 descr->buf_size = skb->len;
927 descr->skb = skb;
928 descr->data_status = 0;
929
930 /* make sure the above values are in memory before we change the
931 * status */
932 wmb();
933
934 spider_net_set_txdescr_cmdstat(descr,skb);
935
936 return 0;
937}
938
939/**
940 * spider_net_kick_tx_dma - enables TX DMA processing
941 * @card: card structure
942 * @descr: descriptor address to enable TX processing at
943 *
944 * spider_net_kick_tx_dma writes the current tx chain head as start address
945 * of the tx descriptor chain and enables the transmission DMA engine
946 */
947static void
948spider_net_kick_tx_dma(struct spider_net_card *card,
949 struct spider_net_descr *descr)
950{
951 /* this is the only descriptor in the output chain.
952 * Enable TX DMA */
953
954 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
955 descr->bus_addr);
956
957 spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
958 SPIDER_NET_DMA_TX_VALUE);
959}
960
961/**
962 * spider_net_xmit - transmits a frame over the device
963 * @skb: packet to send out
964 * @netdev: interface device structure
965 *
966 * returns 0 on success, <0 on failure
967 */
968static int
969spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
970{
971 struct spider_net_card *card = netdev_priv(netdev);
972 struct spider_net_descr *descr;
973 int result;
974
975 descr = spider_net_get_next_tx_descr(card);
976
977 if (!descr) {
978 netif_stop_queue(netdev);
979
980 descr = spider_net_get_next_tx_descr(card);
981 if (!descr)
982 goto error;
983 else
984 netif_start_queue(netdev);
985 }
986
987 result = spider_net_prepare_tx_descr(card, descr, skb);
988 if (result)
989 goto error;
990
991 card->tx_chain.head = card->tx_chain.head->next;
992
993 /* make sure the status from spider_net_prepare_tx_descr is in
994 * memory before we check out the previous descriptor */
995 wmb();
996
997 if (spider_net_get_descr_status(descr->prev) !=
998 SPIDER_NET_DESCR_CARDOWNED)
999 spider_net_kick_tx_dma(card, descr);
1000
1001 return NETDEV_TX_OK;
1002
1003error:
1004 card->netdev_stats.tx_dropped++;
1005 return NETDEV_TX_LOCKED;
1006}
1007
1008/**
1009 * spider_net_do_ioctl - called for device ioctls
1010 * @netdev: interface device structure
1011 * @ifr: request parameter structure for ioctl
1012 * @cmd: command code for ioctl
1013 *
1014 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
1015 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
1016 */
1017static int
1018spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1019{
1020 switch (cmd) {
1021 default:
1022 return -EOPNOTSUPP;
1023 }
1024}
1025
1026/**
1027 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
1028 * @descr: descriptor to process
1029 * @card: card structure
1030 *
1031 * returns 1 on success, 0 if no packet was passed to the stack
1032 *
1033 * iommu-unmaps the skb, fills out skb structure and passes the data to the
1034 * stack. The descriptor state is not changed.
1035 */
1036static int
1037spider_net_pass_skb_up(struct spider_net_descr *descr,
1038 struct spider_net_card *card)
1039{
1040 struct sk_buff *skb;
1041 struct net_device *netdev;
1042 u32 data_status, data_error;
1043
1044 data_status = descr->data_status;
1045 data_error = descr->data_error;
1046
1047 netdev = card->netdev;
1048
1049 /* check for errors in the data_error flag */
1050 if ((data_error & SPIDER_NET_DATA_ERROR_MASK) &&
1051 netif_msg_rx_err(card))
1052 pr_err("error in received descriptor found, "
1053 "data_status=x%08x, data_error=x%08x\n",
1054 data_status, data_error);
1055
1056 /* prepare skb, unmap descriptor */
1057 skb = descr->skb;
1058 pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
1059 PCI_DMA_BIDIRECTIONAL);
1060
1061 /* the cases we'll throw away the packet immediately */
1062 if (data_error & SPIDER_NET_DESTROY_RX_FLAGS)
1063 return 0;
1064
1065 skb->dev = netdev;
1066 skb_put(skb, descr->valid_size);
1067
1068 /* the card seems to add 2 bytes of junk in front
1069 * of the ethernet frame */
1070#define SPIDER_MISALIGN 2
1071 skb_pull(skb, SPIDER_MISALIGN);
1072 skb->protocol = eth_type_trans(skb, netdev);
1073
1074 /* checksum offload */
1075 if (card->options.rx_csum) {
1076 if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) &&
1077 (!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) )
1078 skb->ip_summed = CHECKSUM_UNNECESSARY;
1079 else
1080 skb->ip_summed = CHECKSUM_NONE;
1081 } else {
1082 skb->ip_summed = CHECKSUM_NONE;
1083 }
1084
1085 if (data_status & SPIDER_NET_VLAN_PACKET) {
1086 /* further enhancements: HW-accel VLAN
1087 * vlan_hwaccel_receive_skb
1088 */
1089 }
1090
1091 /* pass skb up to stack */
1092 netif_receive_skb(skb);
1093
1094 /* update netdevice statistics */
1095 card->netdev_stats.rx_packets++;
1096 card->netdev_stats.rx_bytes += skb->len;
1097
1098 return 1;
1099}
1100
1101/**
1102 * spider_net_decode_descr - processes an rx descriptor
1103 * @card: card structure
1104 *
1105 * returns 1 if a packet has been sent to the stack, otherwise 0
1106 *
1107 * processes an rx descriptor by iommu-unmapping the data buffer and passing
1108 * the packet up to the stack
1109 */
1110static int
1111spider_net_decode_one_descr(struct spider_net_card *card)
1112{
1113 enum spider_net_descr_status status;
1114 struct spider_net_descr *descr;
1115 struct spider_net_descr_chain *chain;
1116 int result;
1117
1118 chain = &card->rx_chain;
1119 descr = chain->tail;
1120
1121 status = spider_net_get_descr_status(descr);
1122
1123 if (status == SPIDER_NET_DESCR_CARDOWNED) {
1124 /* nothing in the descriptor yet */
1125 return 0;
1126 }
1127
1128 if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
1129 /* not initialized yet, I bet chain->tail == chain->head
1130 * and the ring is empty */
1131 spider_net_refill_rx_chain(card);
1132 return 0;
1133 }
1134
1135 /* descriptor definitively used -- move on head */
1136 chain->tail = descr->next;
1137
1138 result = 0;
1139 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1140 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1141 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1142 if (netif_msg_rx_err(card))
1143 pr_err("%s: dropping RX descriptor with state %d\n",
1144 card->netdev->name, status);
1145 card->netdev_stats.rx_dropped++;
1146 goto refill;
1147 }
1148
1149 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1150 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1151 if (netif_msg_rx_err(card))
1152 pr_err("%s: RX descriptor with state %d\n",
1153 card->netdev->name, status);
1154 goto refill;
1155 }
1156
1157 /* ok, we've got a packet in descr */
1158 result = spider_net_pass_skb_up(descr, card);
1159refill:
1160 spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
1161 /* change the descriptor state: */
1162 spider_net_refill_rx_chain(card);
1163
1164 return result;
1165}
1166
1167/**
1168 * spider_net_poll - NAPI poll function called by the stack to return packets
1169 * @netdev: interface device structure
1170 * @budget: number of packets we can pass to the stack at most
1171 *
1172 * returns 0 if no more packets available to the driver/stack. Returns 1,
1173 * if the quota is exceeded, but the driver has still packets.
1174 *
1175 * spider_net_poll returns all packets from the rx descriptors to the stack
1176 * (using netif_receive_skb). If all/enough packets are up, the driver
1177 * reenables interrupts and returns 0. If not, 1 is returned.
1178 */
1179static int
1180spider_net_poll(struct net_device *netdev, int *budget)
1181{
1182 struct spider_net_card *card = netdev_priv(netdev);
1183 int packets_to_do, packets_done = 0;
1184 int no_more_packets = 0;
1185
1186 packets_to_do = min(*budget, netdev->quota);
1187
1188 while (packets_to_do) {
1189 if (spider_net_decode_one_descr(card)) {
1190 packets_done++;
1191 packets_to_do--;
1192 } else {
1193 /* no more packets for the stack */
1194 no_more_packets = 1;
1195 break;
1196 }
1197 }
1198
1199 netdev->quota -= packets_done;
1200 *budget -= packets_done;
1201
1202 /* if all packets are in the stack, enable interrupts and return 0 */
1203 /* if not, return 1 */
1204 if (no_more_packets) {
1205 netif_rx_complete(netdev);
1206 spider_net_rx_irq_on(card);
1207 return 0;
1208 }
1209
1210 return 1;
1211}
1212
1213/**
1214 * spider_net_vlan_rx_reg - initializes VLAN structures in the driver and card
1215 * @netdev: interface device structure
1216 * @grp: vlan_group structure that is registered (NULL on destroying interface)
1217 */
1218static void
1219spider_net_vlan_rx_reg(struct net_device *netdev, struct vlan_group *grp)
1220{
1221 /* further enhancement... yet to do */
1222 return;
1223}
1224
1225/**
1226 * spider_net_vlan_rx_add - adds VLAN id to the card filter
1227 * @netdev: interface device structure
1228 * @vid: VLAN id to add
1229 */
1230static void
1231spider_net_vlan_rx_add(struct net_device *netdev, uint16_t vid)
1232{
1233 /* further enhancement... yet to do */
1234 /* add vid to card's VLAN filter table */
1235 return;
1236}
1237
1238/**
1239 * spider_net_vlan_rx_kill - removes VLAN id to the card filter
1240 * @netdev: interface device structure
1241 * @vid: VLAN id to remove
1242 */
1243static void
1244spider_net_vlan_rx_kill(struct net_device *netdev, uint16_t vid)
1245{
1246 /* further enhancement... yet to do */
1247 /* remove vid from card's VLAN filter table */
1248}
1249
1250/**
1251 * spider_net_get_stats - get interface statistics
1252 * @netdev: interface device structure
1253 *
1254 * returns the interface statistics residing in the spider_net_card struct
1255 */
1256static struct net_device_stats *
1257spider_net_get_stats(struct net_device *netdev)
1258{
1259 struct spider_net_card *card = netdev_priv(netdev);
1260 struct net_device_stats *stats = &card->netdev_stats;
1261 return stats;
1262}
1263
1264/**
1265 * spider_net_change_mtu - changes the MTU of an interface
1266 * @netdev: interface device structure
1267 * @new_mtu: new MTU value
1268 *
1269 * returns 0 on success, <0 on failure
1270 */
1271static int
1272spider_net_change_mtu(struct net_device *netdev, int new_mtu)
1273{
1274 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k
1275 * and mtu is outbound only anyway */
1276 if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
1277 (new_mtu > SPIDER_NET_MAX_MTU) )
1278 return -EINVAL;
1279 netdev->mtu = new_mtu;
1280 return 0;
1281}
1282
1283/**
1284 * spider_net_set_mac - sets the MAC of an interface
1285 * @netdev: interface device structure
1286 * @ptr: pointer to new MAC address
1287 *
1288 * Returns 0 on success, <0 on failure. Currently, we don't support this
1289 * and will always return EOPNOTSUPP.
1290 */
1291static int
1292spider_net_set_mac(struct net_device *netdev, void *p)
1293{
1294 struct spider_net_card *card = netdev_priv(netdev);
1295 u32 macl, macu, regvalue;
1296 struct sockaddr *addr = p;
1297
1298 if (!is_valid_ether_addr(addr->sa_data))
1299 return -EADDRNOTAVAIL;
1300
1301 /* switch off GMACTPE and GMACRPE */
1302 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1303 regvalue &= ~((1 << 5) | (1 << 6));
1304 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1305
1306 /* write mac */
1307 macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
1308 (addr->sa_data[2]<<8) + (addr->sa_data[3]);
1309 macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
1310 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1311 spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1312
1313 /* switch GMACTPE and GMACRPE back on */
1314 regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1315 regvalue |= ((1 << 5) | (1 << 6));
1316 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1317
1318 spider_net_set_promisc(card);
1319
1320 /* look up, whether we have been successful */
1321 if (spider_net_get_mac_address(netdev))
1322 return -EADDRNOTAVAIL;
1323 if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
1324 return -EADDRNOTAVAIL;
1325
1326 return 0;
1327}
1328
1329/**
1330 * spider_net_enable_txdmac - enables a TX DMA controller
1331 * @card: card structure
1332 *
1333 * spider_net_enable_txdmac enables the TX DMA controller by setting the
1334 * descriptor chain tail address
1335 */
1336static void
1337spider_net_enable_txdmac(struct spider_net_card *card)
1338{
1339 /* assume chain is aligned correctly */
1340 spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
1341 card->tx_chain.tail->bus_addr);
1342}
1343
1344/**
1345 * spider_net_handle_error_irq - handles errors raised by an interrupt
1346 * @card: card structure
1347 * @status_reg: interrupt status register 0 (GHIINT0STS)
1348 *
1349 * spider_net_handle_error_irq treats or ignores all error conditions
1350 * found when an interrupt is presented
1351 */
1352static void
1353spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1354{
1355 u32 error_reg1, error_reg2;
1356 u32 i;
1357 int show_error = 1;
1358
1359 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1360 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1361
1362 /* check GHIINT0STS ************************************/
1363 if (status_reg)
1364 for (i = 0; i < 32; i++)
1365 if (status_reg & (1<<i))
1366 switch (i)
1367 {
1368 /* let error_reg1 and error_reg2 evaluation decide, what to do
1369 case SPIDER_NET_PHYINT:
1370 case SPIDER_NET_GMAC2INT:
1371 case SPIDER_NET_GMAC1INT:
1372 case SPIDER_NET_GIPSINT:
1373 case SPIDER_NET_GFIFOINT:
1374 case SPIDER_NET_DMACINT:
1375 case SPIDER_NET_GSYSINT:
1376 break; */
1377
1378 case SPIDER_NET_GPWOPCMPINT:
1379 /* PHY write operation completed */
1380 show_error = 0;
1381 break;
1382 case SPIDER_NET_GPROPCMPINT:
1383 /* PHY read operation completed */
1384 /* we don't use semaphores, as we poll for the completion
1385 * of the read operation in spider_net_read_phy. Should take
1386 * about 50 us */
1387 show_error = 0;
1388 break;
1389 case SPIDER_NET_GPWFFINT:
1390 /* PHY command queue full */
1391 if (netif_msg_intr(card))
1392 pr_err("PHY write queue full\n");
1393 show_error = 0;
1394 break;
1395
1396 /* case SPIDER_NET_GRMDADRINT: not used. print a message */
1397 /* case SPIDER_NET_GRMARPINT: not used. print a message */
1398 /* case SPIDER_NET_GRMMPINT: not used. print a message */
1399
1400 case SPIDER_NET_GDTDEN0INT:
1401 /* someone has set TX_DMA_EN to 0 */
1402 show_error = 0;
1403 break;
1404
1405 case SPIDER_NET_GDDDEN0INT: /* fallthrough */
1406 case SPIDER_NET_GDCDEN0INT: /* fallthrough */
1407 case SPIDER_NET_GDBDEN0INT: /* fallthrough */
1408 case SPIDER_NET_GDADEN0INT:
1409 /* someone has set RX_DMA_EN to 0 */
1410 show_error = 0;
1411 break;
1412
1413 /* RX interrupts */
1414 case SPIDER_NET_GDDFDCINT:
1415 case SPIDER_NET_GDCFDCINT:
1416 case SPIDER_NET_GDBFDCINT:
1417 case SPIDER_NET_GDAFDCINT:
1418 /* case SPIDER_NET_GDNMINT: not used. print a message */
1419 /* case SPIDER_NET_GCNMINT: not used. print a message */
1420 /* case SPIDER_NET_GBNMINT: not used. print a message */
1421 /* case SPIDER_NET_GANMINT: not used. print a message */
1422 /* case SPIDER_NET_GRFNMINT: not used. print a message */
1423 show_error = 0;
1424 break;
1425
1426 /* TX interrupts */
1427 case SPIDER_NET_GDTFDCINT:
1428 show_error = 0;
1429 break;
1430 case SPIDER_NET_GTTEDINT:
1431 show_error = 0;
1432 break;
1433 case SPIDER_NET_GDTDCEINT:
1434 /* chain end. If a descriptor should be sent, kick off
1435 * tx dma
1436 if (card->tx_chain.tail == card->tx_chain.head)
1437 spider_net_kick_tx_dma(card);
1438 show_error = 0; */
1439 break;
1440
1441 /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1442 /* case SPIDER_NET_GFREECNTINT: not used. print a message */
1443 }
1444
1445 /* check GHIINT1STS ************************************/
1446 if (error_reg1)
1447 for (i = 0; i < 32; i++)
1448 if (error_reg1 & (1<<i))
1449 switch (i)
1450 {
1451 case SPIDER_NET_GTMFLLINT:
1452 if (netif_msg_intr(card))
1453 pr_err("Spider TX RAM full\n");
1454 show_error = 0;
1455 break;
1456 case SPIDER_NET_GRMFLLINT:
1457 if (netif_msg_intr(card))
1458 pr_err("Spider RX RAM full, incoming packets "
1459 "might be discarded !\n");
1460 netif_rx_schedule(card->netdev);
1461 spider_net_enable_rxchtails(card);
1462 spider_net_enable_rxdmac(card);
1463 break;
1464
1465 /* case SPIDER_NET_GTMSHTINT: problem, print a message */
1466 case SPIDER_NET_GDTINVDINT:
1467 /* allrighty. tx from previous descr ok */
1468 show_error = 0;
1469 break;
1470 /* case SPIDER_NET_GRFDFLLINT: print a message down there */
1471 /* case SPIDER_NET_GRFCFLLINT: print a message down there */
1472 /* case SPIDER_NET_GRFBFLLINT: print a message down there */
1473 /* case SPIDER_NET_GRFAFLLINT: print a message down there */
1474
1475 /* chain end */
1476 case SPIDER_NET_GDDDCEINT: /* fallthrough */
1477 case SPIDER_NET_GDCDCEINT: /* fallthrough */
1478 case SPIDER_NET_GDBDCEINT: /* fallthrough */
1479 case SPIDER_NET_GDADCEINT:
1480 if (netif_msg_intr(card))
1481 pr_err("got descriptor chain end interrupt, "
1482 "restarting DMAC %c.\n",
1483 'D'+i-SPIDER_NET_GDDDCEINT);
1484 spider_net_refill_rx_chain(card);
1485 show_error = 0;
1486 break;
1487
1488 /* invalid descriptor */
1489 case SPIDER_NET_GDDINVDINT: /* fallthrough */
1490 case SPIDER_NET_GDCINVDINT: /* fallthrough */
1491 case SPIDER_NET_GDBINVDINT: /* fallthrough */
1492 case SPIDER_NET_GDAINVDINT:
1493 /* could happen when rx chain is full */
1494 spider_net_refill_rx_chain(card);
1495 show_error = 0;
1496 break;
1497
1498 /* case SPIDER_NET_GDTRSERINT: problem, print a message */
1499 /* case SPIDER_NET_GDDRSERINT: problem, print a message */
1500 /* case SPIDER_NET_GDCRSERINT: problem, print a message */
1501 /* case SPIDER_NET_GDBRSERINT: problem, print a message */
1502 /* case SPIDER_NET_GDARSERINT: problem, print a message */
1503 /* case SPIDER_NET_GDSERINT: problem, print a message */
1504 /* case SPIDER_NET_GDTPTERINT: problem, print a message */
1505 /* case SPIDER_NET_GDDPTERINT: problem, print a message */
1506 /* case SPIDER_NET_GDCPTERINT: problem, print a message */
1507 /* case SPIDER_NET_GDBPTERINT: problem, print a message */
1508 /* case SPIDER_NET_GDAPTERINT: problem, print a message */
1509 default:
1510 show_error = 1;
1511 break;
1512 }
1513
1514 /* check GHIINT2STS ************************************/
1515 if (error_reg2)
1516 for (i = 0; i < 32; i++)
1517 if (error_reg2 & (1<<i))
1518 switch (i)
1519 {
1520 /* there is nothing we can (want to) do at this time. Log a
1521 * message, we can switch on and off the specific values later on
1522 case SPIDER_NET_GPROPERINT:
1523 case SPIDER_NET_GMCTCRSNGINT:
1524 case SPIDER_NET_GMCTLCOLINT:
1525 case SPIDER_NET_GMCTTMOTINT:
1526 case SPIDER_NET_GMCRCAERINT:
1527 case SPIDER_NET_GMCRCALERINT:
1528 case SPIDER_NET_GMCRALNERINT:
1529 case SPIDER_NET_GMCROVRINT:
1530 case SPIDER_NET_GMCRRNTINT:
1531 case SPIDER_NET_GMCRRXERINT:
1532 case SPIDER_NET_GTITCSERINT:
1533 case SPIDER_NET_GTIFMTERINT:
1534 case SPIDER_NET_GTIPKTRVKINT:
1535 case SPIDER_NET_GTISPINGINT:
1536 case SPIDER_NET_GTISADNGINT:
1537 case SPIDER_NET_GTISPDNGINT:
1538 case SPIDER_NET_GRIFMTERINT:
1539 case SPIDER_NET_GRIPKTRVKINT:
1540 case SPIDER_NET_GRISPINGINT:
1541 case SPIDER_NET_GRISADNGINT:
1542 case SPIDER_NET_GRISPDNGINT:
1543 break;
1544 */
1545 default:
1546 break;
1547 }
1548
1549 if ((show_error) && (netif_msg_intr(card)))
1550 pr_err("Got error interrupt, GHIINT0STS = 0x%08x, "
1551 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1552 status_reg, error_reg1, error_reg2);
1553
1554 /* clear interrupt sources */
1555 spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1556 spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1557}
1558
1559/**
1560 * spider_net_interrupt - interrupt handler for spider_net
1561 * @irq: interupt number
1562 * @ptr: pointer to net_device
1563 * @regs: PU registers
1564 *
1565 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1566 * interrupt found raised by card.
1567 *
1568 * This is the interrupt handler, that turns off
1569 * interrupts for this device and makes the stack poll the driver
1570 */
1571static irqreturn_t
1572spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
1573{
1574 struct net_device *netdev = ptr;
1575 struct spider_net_card *card = netdev_priv(netdev);
1576 u32 status_reg;
1577
1578 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1579
1580 if (!status_reg)
1581 return IRQ_NONE;
1582
1583 if (status_reg & SPIDER_NET_TXINT)
1584 spider_net_release_tx_chain(card, 0);
1585
1586 if (status_reg & SPIDER_NET_RXINT ) {
1587 spider_net_rx_irq_off(card);
1588 netif_rx_schedule(netdev);
1589 }
1590
1591 /* we do this after rx and tx processing, as we want the tx chain
1592 * processed to see, whether we should restart tx dma processing */
1593 spider_net_handle_error_irq(card, status_reg);
1594
1595 /* clear interrupt sources */
1596 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1597
1598 return IRQ_HANDLED;
1599}
1600
1601#ifdef CONFIG_NET_POLL_CONTROLLER
1602/**
1603 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1604 * @netdev: interface device structure
1605 *
1606 * see Documentation/networking/netconsole.txt
1607 */
1608static void
1609spider_net_poll_controller(struct net_device *netdev)
1610{
1611 disable_irq(netdev->irq);
1612 spider_net_interrupt(netdev->irq, netdev, NULL);
1613 enable_irq(netdev->irq);
1614}
1615#endif /* CONFIG_NET_POLL_CONTROLLER */
1616
1617/**
1618 * spider_net_init_card - initializes the card
1619 * @card: card structure
1620 *
1621 * spider_net_init_card initializes the card so that other registers can
1622 * be used
1623 */
1624static void
1625spider_net_init_card(struct spider_net_card *card)
1626{
1627 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1628 SPIDER_NET_CKRCTRL_STOP_VALUE);
1629
1630 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1631 SPIDER_NET_CKRCTRL_RUN_VALUE);
1632}
1633
1634/**
1635 * spider_net_enable_card - enables the card by setting all kinds of regs
1636 * @card: card structure
1637 *
1638 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1639 */
1640static void
1641spider_net_enable_card(struct spider_net_card *card)
1642{
1643 int i;
1644 /* the following array consists of (register),(value) pairs
1645 * that are set in this function. A register of 0 ends the list */
1646 u32 regs[][2] = {
1647 { SPIDER_NET_GRESUMINTNUM, 0 },
1648 { SPIDER_NET_GREINTNUM, 0 },
1649
1650 /* set interrupt frame number registers */
1651 /* clear the single DMA engine registers first */
1652 { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1653 { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1654 { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1655 { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1656 /* then set, what we really need */
1657 { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1658
1659 /* timer counter registers and stuff */
1660 { SPIDER_NET_GFREECNNUM, 0 },
1661 { SPIDER_NET_GONETIMENUM, 0 },
1662 { SPIDER_NET_GTOUTFRMNUM, 0 },
1663
1664 /* RX mode setting */
1665 { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1666 /* TX mode setting */
1667 { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1668 /* IPSEC mode setting */
1669 { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1670
1671 { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1672
1673 { SPIDER_NET_GMRWOLCTRL, 0 },
1674 { SPIDER_NET_GTESTMD, 0 },
1675
1676 { SPIDER_NET_GMACINTEN, 0 },
1677
1678 /* flow control stuff */
1679 { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1680 { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1681
1682 { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1683 { 0, 0}
1684 };
1685
1686 i = 0;
1687 while (regs[i][0]) {
1688 spider_net_write_reg(card, regs[i][0], regs[i][1]);
1689 i++;
1690 }
1691
1692 /* clear unicast filter table entries 1 to 14 */
1693 for (i = 1; i <= 14; i++) {
1694 spider_net_write_reg(card,
1695 SPIDER_NET_GMRUAFILnR + i * 8,
1696 0x00080000);
1697 spider_net_write_reg(card,
1698 SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1699 0x00000000);
1700 }
1701
1702 spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1703
1704 spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1705
1706 /* set chain tail adress for RX chains and
1707 * enable DMA */
1708 spider_net_enable_rxchtails(card);
1709 spider_net_enable_rxdmac(card);
1710
1711 spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1712
1713 /* set chain tail adress for TX chain */
1714 spider_net_enable_txdmac(card);
1715
1716 spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1717 SPIDER_NET_LENLMT_VALUE);
1718 spider_net_write_reg(card, SPIDER_NET_GMACMODE,
1719 SPIDER_NET_MACMODE_VALUE);
1720 spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1721 SPIDER_NET_OPMODE_VALUE);
1722
1723 /* set interrupt mask registers */
1724 spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1725 SPIDER_NET_INT0_MASK_VALUE);
1726 spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1727 SPIDER_NET_INT1_MASK_VALUE);
1728 spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1729 SPIDER_NET_INT2_MASK_VALUE);
1730}
1731
1732/**
1733 * spider_net_open - called upon ifonfig up
1734 * @netdev: interface device structure
1735 *
1736 * returns 0 on success, <0 on failure
1737 *
1738 * spider_net_open allocates all the descriptors and memory needed for
1739 * operation, sets up multicast list and enables interrupts
1740 */
1741int
1742spider_net_open(struct net_device *netdev)
1743{
1744 struct spider_net_card *card = netdev_priv(netdev);
1745 int result;
1746
1747 result = -ENOMEM;
1748 if (spider_net_init_chain(card, &card->tx_chain,
1749 card->descr, tx_descriptors))
1750 goto alloc_tx_failed;
1751 if (spider_net_init_chain(card, &card->rx_chain,
1752 card->descr + tx_descriptors, rx_descriptors))
1753 goto alloc_rx_failed;
1754
1755 /* allocate rx skbs */
1756 if (spider_net_alloc_rx_skbs(card))
1757 goto alloc_skbs_failed;
1758
1759 spider_net_set_multi(netdev);
1760
1761 /* further enhancement: setup hw vlan, if needed */
1762
1763 result = -EBUSY;
1764 if (request_irq(netdev->irq, spider_net_interrupt,
1765 SA_SHIRQ, netdev->name, netdev))
1766 goto register_int_failed;
1767
1768 spider_net_enable_card(card);
1769
1770 netif_start_queue(netdev);
1771 netif_carrier_on(netdev);
1772 netif_poll_enable(netdev);
1773
1774 return 0;
1775
1776register_int_failed:
1777 spider_net_free_rx_chain_contents(card);
1778alloc_skbs_failed:
1779 spider_net_free_chain(card, &card->rx_chain);
1780alloc_rx_failed:
1781 spider_net_free_chain(card, &card->tx_chain);
1782alloc_tx_failed:
1783 return result;
1784}
1785
1786/**
1787 * spider_net_setup_phy - setup PHY
1788 * @card: card structure
1789 *
1790 * returns 0 on success, <0 on failure
1791 *
1792 * spider_net_setup_phy is used as part of spider_net_probe. Sets
1793 * the PHY to 1000 Mbps
1794 **/
1795static int
1796spider_net_setup_phy(struct spider_net_card *card)
1797{
1798 struct mii_phy *phy = &card->phy;
1799
1800 spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
1801 SPIDER_NET_DMASEL_VALUE);
1802 spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
1803 SPIDER_NET_PHY_CTRL_VALUE);
1804 phy->mii_id = 1;
1805 phy->dev = card->netdev;
1806 phy->mdio_read = spider_net_read_phy;
1807 phy->mdio_write = spider_net_write_phy;
1808
1809 mii_phy_probe(phy, phy->mii_id);
1810
1811 if (phy->def->ops->setup_forced)
1812 phy->def->ops->setup_forced(phy, SPEED_1000, DUPLEX_FULL);
1813
1814 /* the following two writes could be moved to sungem_phy.c */
1815 /* enable fiber mode */
1816 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x9020);
1817 /* LEDs active in both modes, autosense prio = fiber */
1818 spider_net_write_phy(card->netdev, 1, MII_NCONFIG, 0x945f);
1819
1820 phy->def->ops->read_link(phy);
1821 pr_info("Found %s with %i Mbps, %s-duplex.\n", phy->def->name,
1822 phy->speed, phy->duplex==1 ? "Full" : "Half");
1823
1824 return 0;
1825}
1826
1827/**
1828 * spider_net_download_firmware - loads firmware into the adapter
1829 * @card: card structure
1830 * @firmware: firmware pointer
1831 *
1832 * spider_net_download_firmware loads the firmware opened by
1833 * spider_net_init_firmware into the adapter.
1834 */
1835static void
1836spider_net_download_firmware(struct spider_net_card *card,
1837 const struct firmware *firmware)
1838{
1839 int sequencer, i;
1840 u32 *fw_ptr = (u32 *)firmware->data;
1841
1842 /* stop sequencers */
1843 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1844 SPIDER_NET_STOP_SEQ_VALUE);
1845
1846 for (sequencer = 0; sequencer < 6; sequencer++) {
1847 spider_net_write_reg(card,
1848 SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1849 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
1850 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1851 sequencer * 8, *fw_ptr);
1852 fw_ptr++;
1853 }
1854 }
1855
1856 spider_net_write_reg(card, SPIDER_NET_GSINIT,
1857 SPIDER_NET_RUN_SEQ_VALUE);
1858}
1859
1860/**
1861 * spider_net_init_firmware - reads in firmware parts
1862 * @card: card structure
1863 *
1864 * Returns 0 on success, <0 on failure
1865 *
1866 * spider_net_init_firmware opens the sequencer firmware and does some basic
1867 * checks. This function opens and releases the firmware structure. A call
1868 * to download the firmware is performed before the release.
1869 *
1870 * Firmware format
1871 * ===============
1872 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1873 * the program for each sequencer. Use the command
1874 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
1875 * Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt \
1876 * Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1877 *
1878 * to generate spider_fw.bin, if you have sequencer programs with something
1879 * like the following contents for each sequencer:
1880 * <ONE LINE COMMENT>
1881 * <FIRST 4-BYTES-WORD FOR SEQUENCER>
1882 * <SECOND 4-BYTES-WORD FOR SEQUENCER>
1883 * ...
1884 * <1024th 4-BYTES-WORD FOR SEQUENCER>
1885 */
1886static int
1887spider_net_init_firmware(struct spider_net_card *card)
1888{
1889 const struct firmware *firmware;
1890 int err = -EIO;
1891
1892 if (request_firmware(&firmware,
1893 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) {
1894 if (netif_msg_probe(card))
1895 pr_err("Couldn't read in sequencer data file %s.\n",
1896 SPIDER_NET_FIRMWARE_NAME);
1897 firmware = NULL;
1898 goto out;
1899 }
1900
1901 if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) {
1902 if (netif_msg_probe(card))
1903 pr_err("Invalid size of sequencer data file %s.\n",
1904 SPIDER_NET_FIRMWARE_NAME);
1905 goto out;
1906 }
1907
1908 spider_net_download_firmware(card, firmware);
1909
1910 err = 0;
1911out:
1912 release_firmware(firmware);
1913
1914 return err;
1915}
1916
1917/**
1918 * spider_net_workaround_rxramfull - work around firmware bug
1919 * @card: card structure
1920 *
1921 * no return value
1922 **/
1923static void
1924spider_net_workaround_rxramfull(struct spider_net_card *card)
1925{
1926 int i, sequencer = 0;
1927
1928 /* cancel reset */
1929 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1930 SPIDER_NET_CKRCTRL_RUN_VALUE);
1931
1932 /* empty sequencer data */
1933 for (sequencer = 0; sequencer < 6; sequencer++) {
1934 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1935 sequencer * 8, 0x0);
1936 for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) {
1937 spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1938 sequencer * 8, 0x0);
1939 }
1940 }
1941
1942 /* set sequencer operation */
1943 spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
1944
1945 /* reset */
1946 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1947 SPIDER_NET_CKRCTRL_STOP_VALUE);
1948}
1949
1950/**
1951 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
1952 * function (to be called not under interrupt status)
1953 * @data: data, is interface device structure
1954 *
1955 * called as task when tx hangs, resets interface (if interface is up)
1956 */
1957static void
1958spider_net_tx_timeout_task(void *data)
1959{
1960 struct net_device *netdev = data;
1961 struct spider_net_card *card = netdev_priv(netdev);
1962
1963 if (!(netdev->flags & IFF_UP))
1964 goto out;
1965
1966 netif_device_detach(netdev);
1967 spider_net_stop(netdev);
1968
1969 spider_net_workaround_rxramfull(card);
1970 spider_net_init_card(card);
1971
1972 if (spider_net_setup_phy(card))
1973 goto out;
1974 if (spider_net_init_firmware(card))
1975 goto out;
1976
1977 spider_net_open(netdev);
1978 spider_net_kick_tx_dma(card, card->tx_chain.head);
1979 netif_device_attach(netdev);
1980
1981out:
1982 atomic_dec(&card->tx_timeout_task_counter);
1983}
1984
1985/**
1986 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
1987 * @netdev: interface device structure
1988 *
1989 * called, if tx hangs. Schedules a task that resets the interface
1990 */
1991static void
1992spider_net_tx_timeout(struct net_device *netdev)
1993{
1994 struct spider_net_card *card;
1995
1996 card = netdev_priv(netdev);
1997 atomic_inc(&card->tx_timeout_task_counter);
1998 if (netdev->flags & IFF_UP)
1999 schedule_work(&card->tx_timeout_task);
2000 else
2001 atomic_dec(&card->tx_timeout_task_counter);
2002}
2003
2004/**
2005 * spider_net_setup_netdev_ops - initialization of net_device operations
2006 * @netdev: net_device structure
2007 *
2008 * fills out function pointers in the net_device structure
2009 */
2010static void
2011spider_net_setup_netdev_ops(struct net_device *netdev)
2012{
2013 netdev->open = &spider_net_open;
2014 netdev->stop = &spider_net_stop;
2015 netdev->hard_start_xmit = &spider_net_xmit;
2016 netdev->get_stats = &spider_net_get_stats;
2017 netdev->set_multicast_list = &spider_net_set_multi;
2018 netdev->set_mac_address = &spider_net_set_mac;
2019 netdev->change_mtu = &spider_net_change_mtu;
2020 netdev->do_ioctl = &spider_net_do_ioctl;
2021 /* tx watchdog */
2022 netdev->tx_timeout = &spider_net_tx_timeout;
2023 netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2024 /* NAPI */
2025 netdev->poll = &spider_net_poll;
2026 netdev->weight = SPIDER_NET_NAPI_WEIGHT;
2027 /* HW VLAN */
2028 netdev->vlan_rx_register = &spider_net_vlan_rx_reg;
2029 netdev->vlan_rx_add_vid = &spider_net_vlan_rx_add;
2030 netdev->vlan_rx_kill_vid = &spider_net_vlan_rx_kill;
2031#ifdef CONFIG_NET_POLL_CONTROLLER
2032 /* poll controller */
2033 netdev->poll_controller = &spider_net_poll_controller;
2034#endif /* CONFIG_NET_POLL_CONTROLLER */
2035 /* ethtool ops */
2036 netdev->ethtool_ops = &spider_net_ethtool_ops;
2037}
2038
2039/**
2040 * spider_net_setup_netdev - initialization of net_device
2041 * @card: card structure
2042 *
2043 * Returns 0 on success or <0 on failure
2044 *
2045 * spider_net_setup_netdev initializes the net_device structure
2046 **/
2047static int
2048spider_net_setup_netdev(struct spider_net_card *card)
2049{
2050 int result;
2051 struct net_device *netdev = card->netdev;
2052 struct device_node *dn;
2053 struct sockaddr addr;
2054 u8 *mac;
2055
2056 SET_MODULE_OWNER(netdev);
2057 SET_NETDEV_DEV(netdev, &card->pdev->dev);
2058
2059 pci_set_drvdata(card->pdev, netdev);
2060 spin_lock_init(&card->intmask_lock);
2061 netdev->irq = card->pdev->irq;
2062
2063 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2064
2065 spider_net_setup_netdev_ops(netdev);
2066
2067 netdev->features = 0;
2068 /* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2069 * NETIF_F_HW_VLAN_FILTER */
2070
2071 netdev->irq = card->pdev->irq;
2072
2073 dn = pci_device_to_OF_node(card->pdev);
2074 if (!dn)
2075 return -EIO;
2076
2077 mac = (u8 *)get_property(dn, "local-mac-address", NULL);
2078 if (!mac)
2079 return -EIO;
2080 memcpy(addr.sa_data, mac, ETH_ALEN);
2081
2082 result = spider_net_set_mac(netdev, &addr);
2083 if ((result) && (netif_msg_probe(card)))
2084 pr_err("Failed to set MAC address: %i\n", result);
2085
2086 result = register_netdev(netdev);
2087 if (result) {
2088 if (netif_msg_probe(card))
2089 pr_err("Couldn't register net_device: %i\n",
2090 result);
2091 return result;
2092 }
2093
2094 if (netif_msg_probe(card))
2095 pr_info("Initialized device %s.\n", netdev->name);
2096
2097 return 0;
2098}
2099
2100/**
2101 * spider_net_alloc_card - allocates net_device and card structure
2102 *
2103 * returns the card structure or NULL in case of errors
2104 *
2105 * the card and net_device structures are linked to each other
2106 */
2107static struct spider_net_card *
2108spider_net_alloc_card(void)
2109{
2110 struct net_device *netdev;
2111 struct spider_net_card *card;
2112 size_t alloc_size;
2113
2114 alloc_size = sizeof (*card) +
2115 sizeof (struct spider_net_descr) * rx_descriptors +
2116 sizeof (struct spider_net_descr) * tx_descriptors;
2117 netdev = alloc_etherdev(alloc_size);
2118 if (!netdev)
2119 return NULL;
2120
2121 card = netdev_priv(netdev);
2122 card->netdev = netdev;
2123 card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2124 INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev);
2125 init_waitqueue_head(&card->waitq);
2126 atomic_set(&card->tx_timeout_task_counter, 0);
2127
2128 return card;
2129}
2130
2131/**
2132 * spider_net_undo_pci_setup - releases PCI ressources
2133 * @card: card structure
2134 *
2135 * spider_net_undo_pci_setup releases the mapped regions
2136 */
2137static void
2138spider_net_undo_pci_setup(struct spider_net_card *card)
2139{
2140 iounmap(card->regs);
2141 pci_release_regions(card->pdev);
2142}
2143
2144/**
2145 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2146 * @card: card structure
2147 * @pdev: PCI device
2148 *
2149 * Returns the card structure or NULL if any errors occur
2150 *
2151 * spider_net_setup_pci_dev initializes pdev and together with the
2152 * functions called in spider_net_open configures the device so that
2153 * data can be transferred over it
2154 * The net_device structure is attached to the card structure, if the
2155 * function returns without error.
2156 **/
2157static struct spider_net_card *
2158spider_net_setup_pci_dev(struct pci_dev *pdev)
2159{
2160 struct spider_net_card *card;
2161 unsigned long mmio_start, mmio_len;
2162
2163 if (pci_enable_device(pdev)) {
2164 pr_err("Couldn't enable PCI device\n");
2165 return NULL;
2166 }
2167
2168 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2169 pr_err("Couldn't find proper PCI device base address.\n");
2170 goto out_disable_dev;
2171 }
2172
2173 if (pci_request_regions(pdev, spider_net_driver_name)) {
2174 pr_err("Couldn't obtain PCI resources, aborting.\n");
2175 goto out_disable_dev;
2176 }
2177
2178 pci_set_master(pdev);
2179
2180 card = spider_net_alloc_card();
2181 if (!card) {
2182 pr_err("Couldn't allocate net_device structure, "
2183 "aborting.\n");
2184 goto out_release_regions;
2185 }
2186 card->pdev = pdev;
2187
2188 /* fetch base address and length of first resource */
2189 mmio_start = pci_resource_start(pdev, 0);
2190 mmio_len = pci_resource_len(pdev, 0);
2191
2192 card->netdev->mem_start = mmio_start;
2193 card->netdev->mem_end = mmio_start + mmio_len;
2194 card->regs = ioremap(mmio_start, mmio_len);
2195
2196 if (!card->regs) {
2197 pr_err("Couldn't obtain PCI resources, aborting.\n");
2198 goto out_release_regions;
2199 }
2200
2201 return card;
2202
2203out_release_regions:
2204 pci_release_regions(pdev);
2205out_disable_dev:
2206 pci_disable_device(pdev);
2207 pci_set_drvdata(pdev, NULL);
2208 return NULL;
2209}
2210
2211/**
2212 * spider_net_probe - initialization of a device
2213 * @pdev: PCI device
2214 * @ent: entry in the device id list
2215 *
2216 * Returns 0 on success, <0 on failure
2217 *
2218 * spider_net_probe initializes pdev and registers a net_device
2219 * structure for it. After that, the device can be ifconfig'ed up
2220 **/
2221static int __devinit
2222spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2223{
2224 int err = -EIO;
2225 struct spider_net_card *card;
2226
2227 card = spider_net_setup_pci_dev(pdev);
2228 if (!card)
2229 goto out;
2230
2231 spider_net_workaround_rxramfull(card);
2232 spider_net_init_card(card);
2233
2234 err = spider_net_setup_phy(card);
2235 if (err)
2236 goto out_undo_pci;
2237
2238 err = spider_net_init_firmware(card);
2239 if (err)
2240 goto out_undo_pci;
2241
2242 err = spider_net_setup_netdev(card);
2243 if (err)
2244 goto out_undo_pci;
2245
2246 return 0;
2247
2248out_undo_pci:
2249 spider_net_undo_pci_setup(card);
2250 free_netdev(card->netdev);
2251out:
2252 return err;
2253}
2254
2255/**
2256 * spider_net_remove - removal of a device
2257 * @pdev: PCI device
2258 *
2259 * Returns 0 on success, <0 on failure
2260 *
2261 * spider_net_remove is called to remove the device and unregisters the
2262 * net_device
2263 **/
2264static void __devexit
2265spider_net_remove(struct pci_dev *pdev)
2266{
2267 struct net_device *netdev;
2268 struct spider_net_card *card;
2269
2270 netdev = pci_get_drvdata(pdev);
2271 card = netdev_priv(netdev);
2272
2273 wait_event(card->waitq,
2274 atomic_read(&card->tx_timeout_task_counter) == 0);
2275
2276 unregister_netdev(netdev);
2277
2278 /* switch off card */
2279 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2280 SPIDER_NET_CKRCTRL_STOP_VALUE);
2281 spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2282 SPIDER_NET_CKRCTRL_RUN_VALUE);
2283
2284 spider_net_undo_pci_setup(card);
2285 free_netdev(netdev);
2286}
2287
2288static struct pci_driver spider_net_driver = {
2289 .owner = THIS_MODULE,
2290 .name = spider_net_driver_name,
2291 .id_table = spider_net_pci_tbl,
2292 .probe = spider_net_probe,
2293 .remove = __devexit_p(spider_net_remove)
2294};
2295
2296/**
2297 * spider_net_init - init function when the driver is loaded
2298 *
2299 * spider_net_init registers the device driver
2300 */
2301static int __init spider_net_init(void)
2302{
2303 if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2304 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2305 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2306 }
2307 if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2308 rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2309 pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2310 }
2311 if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2312 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2313 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2314 }
2315 if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2316 tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2317 pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2318 }
2319
2320 return pci_register_driver(&spider_net_driver);
2321}
2322
2323/**
2324 * spider_net_cleanup - exit function when driver is unloaded
2325 *
2326 * spider_net_cleanup unregisters the device driver
2327 */
2328static void __exit spider_net_cleanup(void)
2329{
2330 pci_unregister_driver(&spider_net_driver);
2331}
2332
2333module_init(spider_net_init);
2334module_exit(spider_net_cleanup);
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
new file mode 100644
index 000000000000..22b2f2347351
--- /dev/null
+++ b/drivers/net/spider_net.h
@@ -0,0 +1,469 @@
1/*
2 * Network device driver for Cell Processor-Based Blade
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#ifndef _SPIDER_NET_H
25#define _SPIDER_NET_H
26
27#include "sungem_phy.h"
28
29extern int spider_net_stop(struct net_device *netdev);
30extern int spider_net_open(struct net_device *netdev);
31
32extern struct ethtool_ops spider_net_ethtool_ops;
33
34extern char spider_net_driver_name[];
35
36#define SPIDER_NET_MAX_MTU 2308
37#define SPIDER_NET_MIN_MTU 64
38
39#define SPIDER_NET_RXBUF_ALIGN 128
40
41#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64
42#define SPIDER_NET_RX_DESCRIPTORS_MIN 16
43#define SPIDER_NET_RX_DESCRIPTORS_MAX 256
44
45#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64
46#define SPIDER_NET_TX_DESCRIPTORS_MIN 16
47#define SPIDER_NET_TX_DESCRIPTORS_MAX 256
48
49#define SPIDER_NET_RX_CSUM_DEFAULT 1
50
51#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ
52#define SPIDER_NET_NAPI_WEIGHT 64
53
54#define SPIDER_NET_FIRMWARE_LEN 1024
55#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
56
57/** spider_net SMMIO registers */
58#define SPIDER_NET_GHIINT0STS 0x00000000
59#define SPIDER_NET_GHIINT1STS 0x00000004
60#define SPIDER_NET_GHIINT2STS 0x00000008
61#define SPIDER_NET_GHIINT0MSK 0x00000010
62#define SPIDER_NET_GHIINT1MSK 0x00000014
63#define SPIDER_NET_GHIINT2MSK 0x00000018
64
65#define SPIDER_NET_GRESUMINTNUM 0x00000020
66#define SPIDER_NET_GREINTNUM 0x00000024
67
68#define SPIDER_NET_GFFRMNUM 0x00000028
69#define SPIDER_NET_GFAFRMNUM 0x0000002c
70#define SPIDER_NET_GFBFRMNUM 0x00000030
71#define SPIDER_NET_GFCFRMNUM 0x00000034
72#define SPIDER_NET_GFDFRMNUM 0x00000038
73
74/* clear them (don't use it) */
75#define SPIDER_NET_GFREECNNUM 0x0000003c
76#define SPIDER_NET_GONETIMENUM 0x00000040
77
78#define SPIDER_NET_GTOUTFRMNUM 0x00000044
79
80#define SPIDER_NET_GTXMDSET 0x00000050
81#define SPIDER_NET_GPCCTRL 0x00000054
82#define SPIDER_NET_GRXMDSET 0x00000058
83#define SPIDER_NET_GIPSECINIT 0x0000005c
84#define SPIDER_NET_GFTRESTRT 0x00000060
85#define SPIDER_NET_GRXDMAEN 0x00000064
86#define SPIDER_NET_GMRWOLCTRL 0x00000068
87#define SPIDER_NET_GPCWOPCMD 0x0000006c
88#define SPIDER_NET_GPCROPCMD 0x00000070
89#define SPIDER_NET_GTTFRMCNT 0x00000078
90#define SPIDER_NET_GTESTMD 0x0000007c
91
92#define SPIDER_NET_GSINIT 0x00000080
93#define SPIDER_NET_GSnPRGADR 0x00000084
94#define SPIDER_NET_GSnPRGDAT 0x00000088
95
96#define SPIDER_NET_GMACOPEMD 0x00000100
97#define SPIDER_NET_GMACLENLMT 0x00000108
98#define SPIDER_NET_GMACINTEN 0x00000118
99#define SPIDER_NET_GMACPHYCTRL 0x00000120
100
101#define SPIDER_NET_GMACAPAUSE 0x00000154
102#define SPIDER_NET_GMACTXPAUSE 0x00000164
103
104#define SPIDER_NET_GMACMODE 0x000001b0
105#define SPIDER_NET_GMACBSTLMT 0x000001b4
106
107#define SPIDER_NET_GMACUNIMACU 0x000001c0
108#define SPIDER_NET_GMACUNIMACL 0x000001c8
109
110#define SPIDER_NET_GMRMHFILnR 0x00000400
111#define SPIDER_NET_MULTICAST_HASHES 256
112
113#define SPIDER_NET_GMRUAFILnR 0x00000500
114#define SPIDER_NET_GMRUA0FIL15R 0x00000578
115
116/* RX DMA controller registers, all 0x00000a.. are for DMA controller A,
117 * 0x00000b.. for DMA controller B, etc. */
118#define SPIDER_NET_GDADCHA 0x00000a00
119#define SPIDER_NET_GDADMACCNTR 0x00000a04
120#define SPIDER_NET_GDACTDPA 0x00000a08
121#define SPIDER_NET_GDACTDCNT 0x00000a0c
122#define SPIDER_NET_GDACDBADDR 0x00000a20
123#define SPIDER_NET_GDACDBSIZE 0x00000a24
124#define SPIDER_NET_GDACNEXTDA 0x00000a28
125#define SPIDER_NET_GDACCOMST 0x00000a2c
126#define SPIDER_NET_GDAWBCOMST 0x00000a30
127#define SPIDER_NET_GDAWBRSIZE 0x00000a34
128#define SPIDER_NET_GDAWBVSIZE 0x00000a38
129#define SPIDER_NET_GDAWBTRST 0x00000a3c
130#define SPIDER_NET_GDAWBTRERR 0x00000a40
131
132/* TX DMA controller registers */
133#define SPIDER_NET_GDTDCHA 0x00000e00
134#define SPIDER_NET_GDTDMACCNTR 0x00000e04
135#define SPIDER_NET_GDTCDPA 0x00000e08
136#define SPIDER_NET_GDTDMASEL 0x00000e14
137
138#define SPIDER_NET_ECMODE 0x00000f00
139/* clock and reset control register */
140#define SPIDER_NET_CKRCTRL 0x00000ff0
141
142/** SCONFIG registers */
143#define SPIDER_NET_SCONFIG_IOACTE 0x00002810
144
145/** hardcoded register values */
146#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff
147#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff
148/* no MAC aborts -> auto retransmission */
149#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1
150
151/* clear counter when interrupt sources are cleared
152#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
153/* we rely on flagged descriptor interrupts */
154#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
155/* set this first, then the FRAMENUM_VALUE */
156#define SPIDER_NET_GFXFRAMES_VALUE 0x00000000
157
158#define SPIDER_NET_STOP_SEQ_VALUE 0x00000000
159#define SPIDER_NET_RUN_SEQ_VALUE 0x0000007e
160
161#define SPIDER_NET_PHY_CTRL_VALUE 0x00040040
162/* #define SPIDER_NET_PHY_CTRL_VALUE 0x01070080*/
163#define SPIDER_NET_RXMODE_VALUE 0x00000011
164/* auto retransmission in case of MAC aborts */
165#define SPIDER_NET_TXMODE_VALUE 0x00010000
166#define SPIDER_NET_RESTART_VALUE 0x00000000
167#define SPIDER_NET_WOL_VALUE 0x00001111
168#if 0
169#define SPIDER_NET_WOL_VALUE 0x00000000
170#endif
171#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8
172
173/* pause frames: automatic, no upper retransmission count */
174/* outside loopback mode: ETOMOD signal dont matter, not connected */
175#define SPIDER_NET_OPMODE_VALUE 0x00000063
176/*#define SPIDER_NET_OPMODE_VALUE 0x001b0062*/
177#define SPIDER_NET_LENLMT_VALUE 0x00000908
178
179#define SPIDER_NET_MACAPAUSE_VALUE 0x00000800 /* about 1 ms */
180#define SPIDER_NET_TXPAUSE_VALUE 0x00000000
181
182#define SPIDER_NET_MACMODE_VALUE 0x00000001
183#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
184
185/* 1(0) enable r/tx dma
186 * 0000000 fixed to 0
187 *
188 * 000000 fixed to 0
189 * 0(1) en/disable descr writeback on force end
190 * 0(1) force end
191 *
192 * 000000 fixed to 0
193 * 00 burst alignment: 128 bytes
194 *
195 * 00000 fixed to 0
196 * 0 descr writeback size 32 bytes
197 * 0(1) descr chain end interrupt enable
198 * 0(1) descr status writeback enable */
199
200/* to set RX_DMA_EN */
201#define SPIDER_NET_DMA_RX_VALUE 0x80000000
202#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
203/* to set TX_DMA_EN */
204#define SPIDER_NET_DMA_TX_VALUE 0x80000000
205#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
206
207/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
208#define SPIDER_NET_UA_DESCR_VALUE 0x00080000
209#define SPIDER_NET_PROMISC_VALUE 0x00080000
210#define SPIDER_NET_NONPROMISC_VALUE 0x00000000
211
212#define SPIDER_NET_DMASEL_VALUE 0x00000001
213
214#define SPIDER_NET_ECMODE_VALUE 0x00000000
215
216#define SPIDER_NET_CKRCTRL_RUN_VALUE 0x1fff010f
217#define SPIDER_NET_CKRCTRL_STOP_VALUE 0x0000010f
218
219#define SPIDER_NET_SBIMSTATE_VALUE 0x00000000
220#define SPIDER_NET_SBTMSTATE_VALUE 0x00000000
221
222/* SPIDER_NET_GHIINT0STS bits, in reverse order so that they can be used
223 * with 1 << SPIDER_NET_... */
224enum spider_net_int0_status {
225 SPIDER_NET_GPHYINT = 0,
226 SPIDER_NET_GMAC2INT,
227 SPIDER_NET_GMAC1INT,
228 SPIDER_NET_GIPSINT,
229 SPIDER_NET_GFIFOINT,
230 SPIDER_NET_GDMACINT,
231 SPIDER_NET_GSYSINT,
232 SPIDER_NET_GPWOPCMPINT,
233 SPIDER_NET_GPROPCMPINT,
234 SPIDER_NET_GPWFFINT,
235 SPIDER_NET_GRMDADRINT,
236 SPIDER_NET_GRMARPINT,
237 SPIDER_NET_GRMMPINT,
238 SPIDER_NET_GDTDEN0INT,
239 SPIDER_NET_GDDDEN0INT,
240 SPIDER_NET_GDCDEN0INT,
241 SPIDER_NET_GDBDEN0INT,
242 SPIDER_NET_GDADEN0INT,
243 SPIDER_NET_GDTFDCINT,
244 SPIDER_NET_GDDFDCINT,
245 SPIDER_NET_GDCFDCINT,
246 SPIDER_NET_GDBFDCINT,
247 SPIDER_NET_GDAFDCINT,
248 SPIDER_NET_GTTEDINT,
249 SPIDER_NET_GDTDCEINT,
250 SPIDER_NET_GRFDNMINT,
251 SPIDER_NET_GRFCNMINT,
252 SPIDER_NET_GRFBNMINT,
253 SPIDER_NET_GRFANMINT,
254 SPIDER_NET_GRFNMINT,
255 SPIDER_NET_G1TMCNTINT,
256 SPIDER_NET_GFREECNTINT
257};
258/* GHIINT1STS bits */
259enum spider_net_int1_status {
260 SPIDER_NET_GTMFLLINT = 0,
261 SPIDER_NET_GRMFLLINT,
262 SPIDER_NET_GTMSHTINT,
263 SPIDER_NET_GDTINVDINT,
264 SPIDER_NET_GRFDFLLINT,
265 SPIDER_NET_GDDDCEINT,
266 SPIDER_NET_GDDINVDINT,
267 SPIDER_NET_GRFCFLLINT,
268 SPIDER_NET_GDCDCEINT,
269 SPIDER_NET_GDCINVDINT,
270 SPIDER_NET_GRFBFLLINT,
271 SPIDER_NET_GDBDCEINT,
272 SPIDER_NET_GDBINVDINT,
273 SPIDER_NET_GRFAFLLINT,
274 SPIDER_NET_GDADCEINT,
275 SPIDER_NET_GDAINVDINT,
276 SPIDER_NET_GDTRSERINT,
277 SPIDER_NET_GDDRSERINT,
278 SPIDER_NET_GDCRSERINT,
279 SPIDER_NET_GDBRSERINT,
280 SPIDER_NET_GDARSERINT,
281 SPIDER_NET_GDSERINT,
282 SPIDER_NET_GDTPTERINT,
283 SPIDER_NET_GDDPTERINT,
284 SPIDER_NET_GDCPTERINT,
285 SPIDER_NET_GDBPTERINT,
286 SPIDER_NET_GDAPTERINT
287};
288/* GHIINT2STS bits */
289enum spider_net_int2_status {
290 SPIDER_NET_GPROPERINT = 0,
291 SPIDER_NET_GMCTCRSNGINT,
292 SPIDER_NET_GMCTLCOLINT,
293 SPIDER_NET_GMCTTMOTINT,
294 SPIDER_NET_GMCRCAERINT,
295 SPIDER_NET_GMCRCALERINT,
296 SPIDER_NET_GMCRALNERINT,
297 SPIDER_NET_GMCROVRINT,
298 SPIDER_NET_GMCRRNTINT,
299 SPIDER_NET_GMCRRXERINT,
300 SPIDER_NET_GTITCSERINT,
301 SPIDER_NET_GTIFMTERINT,
302 SPIDER_NET_GTIPKTRVKINT,
303 SPIDER_NET_GTISPINGINT,
304 SPIDER_NET_GTISADNGINT,
305 SPIDER_NET_GTISPDNGINT,
306 SPIDER_NET_GRIFMTERINT,
307 SPIDER_NET_GRIPKTRVKINT,
308 SPIDER_NET_GRISPINGINT,
309 SPIDER_NET_GRISADNGINT,
310 SPIDER_NET_GRISPDNGINT
311};
312
313#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GTTEDINT) | \
314 (1 << SPIDER_NET_GDTDCEINT) | \
315 (1 << SPIDER_NET_GDTFDCINT) )
316
317/* we rely on flagged descriptor interrupts*/
318#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
319 (1 << SPIDER_NET_GRMFLLINT) )
320
321#define SPIDER_NET_GPREXEC 0x80000000
322#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
323
324/* descriptor bits
325 *
326 * 1010 descriptor ready
327 * 0 descr in middle of chain
328 * 000 fixed to 0
329 *
330 * 0 no interrupt on completion
331 * 000 fixed to 0
332 * 1 no ipsec processing
333 * 1 last descriptor for this frame
334 * 00 no checksum
335 * 10 tcp checksum
336 * 11 udp checksum
337 *
338 * 00 fixed to 0
339 * 0 fixed to 0
340 * 0 no interrupt on response errors
341 * 0 no interrupt on invalid descr
342 * 0 no interrupt on dma process termination
343 * 0 no interrupt on descr chain end
344 * 0 no interrupt on descr complete
345 *
346 * 000 fixed to 0
347 * 0 response error interrupt status
348 * 0 invalid descr status
349 * 0 dma termination status
350 * 0 descr chain end status
351 * 0 descr complete status */
352#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
353#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
354#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
355#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
356#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
357
358/* descr ready, descr is in middle of chain, get interrupt on completion */
359#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
360
361/* multicast is no problem */
362#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
363
364enum spider_net_descr_status {
365 SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
366 SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
367 SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
368 SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
369 SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
370 SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
371 SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
372};
373
374struct spider_net_descr {
375 /* as defined by the hardware */
376 dma_addr_t buf_addr;
377 u32 buf_size;
378 dma_addr_t next_descr_addr;
379 u32 dmac_cmd_status;
380 u32 result_size;
381 u32 valid_size; /* all zeroes for tx */
382 u32 data_status;
383 u32 data_error; /* all zeroes for tx */
384
385 /* used in the driver */
386 struct sk_buff *skb;
387 dma_addr_t bus_addr;
388 struct spider_net_descr *next;
389 struct spider_net_descr *prev;
390} __attribute__((aligned(32)));
391
392struct spider_net_descr_chain {
393 /* we walk from tail to head */
394 struct spider_net_descr *head;
395 struct spider_net_descr *tail;
396};
397
398/* descriptor data_status bits */
399#define SPIDER_NET_RXIPCHK 29
400#define SPIDER_NET_TCPUDPIPCHK 28
401#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
402 1 << SPIDER_NET_TCPUDPIPCHK)
403
404#define SPIDER_NET_VLAN_PACKET 21
405
406/* descriptor data_error bits */
407#define SPIDER_NET_RXIPCHKERR 27
408#define SPIDER_NET_RXTCPCHKERR 26
409#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
410 1 << SPIDER_NET_RXTCPCHKERR)
411
412/* the cases we don't pass the packet to the stack */
413#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000
414
415#define SPIDER_NET_DESCR_SIZE 32
416
417/* this will be bigger some time */
418struct spider_net_options {
419 int rx_csum; /* for rx: if 0 ip_summed=NONE,
420 if 1 and hw has verified, ip_summed=UNNECESSARY */
421};
422
423#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
424 NETIF_MSG_PROBE | \
425 NETIF_MSG_LINK | \
426 NETIF_MSG_TIMER | \
427 NETIF_MSG_IFDOWN | \
428 NETIF_MSG_IFUP | \
429 NETIF_MSG_RX_ERR | \
430 NETIF_MSG_TX_ERR | \
431 NETIF_MSG_TX_QUEUED | \
432 NETIF_MSG_INTR | \
433 NETIF_MSG_TX_DONE | \
434 NETIF_MSG_RX_STATUS | \
435 NETIF_MSG_PKTDATA | \
436 NETIF_MSG_HW | \
437 NETIF_MSG_WOL )
438
439struct spider_net_card {
440 struct net_device *netdev;
441 struct pci_dev *pdev;
442 struct mii_phy phy;
443
444 void __iomem *regs;
445
446 struct spider_net_descr_chain tx_chain;
447 struct spider_net_descr_chain rx_chain;
448 spinlock_t chain_lock;
449
450 struct net_device_stats netdev_stats;
451
452 struct spider_net_options options;
453
454 spinlock_t intmask_lock;
455
456 struct work_struct tx_timeout_task;
457 atomic_t tx_timeout_task_counter;
458 wait_queue_head_t waitq;
459
460 /* for ethtool */
461 int msg_enable;
462
463 struct spider_net_descr descr[0];
464};
465
466#define pr_err(fmt,arg...) \
467 printk(KERN_ERR fmt ,##arg)
468
469#endif
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
new file mode 100644
index 000000000000..d42e60ba74ce
--- /dev/null
+++ b/drivers/net/spider_net_ethtool.c
@@ -0,0 +1,126 @@
1/*
2 * Network device driver for Cell Processor-Based Blade
3 *
4 * (C) Copyright IBM Corp. 2005
5 *
6 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
7 * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24#include <linux/netdevice.h>
25#include <linux/ethtool.h>
26#include <linux/pci.h>
27
28#include "spider_net.h"
29
30static int
31spider_net_ethtool_get_settings(struct net_device *netdev,
32 struct ethtool_cmd *cmd)
33{
34 struct spider_net_card *card;
35 card = netdev_priv(netdev);
36
37 cmd->supported = (SUPPORTED_1000baseT_Full |
38 SUPPORTED_FIBRE);
39 cmd->advertising = (ADVERTISED_1000baseT_Full |
40 ADVERTISED_FIBRE);
41 cmd->port = PORT_FIBRE;
42 cmd->speed = card->phy.speed;
43 cmd->duplex = DUPLEX_FULL;
44
45 return 0;
46}
47
48static void
49spider_net_ethtool_get_drvinfo(struct net_device *netdev,
50 struct ethtool_drvinfo *drvinfo)
51{
52 struct spider_net_card *card;
53 card = netdev_priv(netdev);
54
55 /* clear and fill out info */
56 memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
57 strncpy(drvinfo->driver, spider_net_driver_name, 32);
58 strncpy(drvinfo->version, "0.1", 32);
59 strcpy(drvinfo->fw_version, "no information");
60 strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
61}
62
63static void
64spider_net_ethtool_get_wol(struct net_device *netdev,
65 struct ethtool_wolinfo *wolinfo)
66{
67 /* no support for wol */
68 wolinfo->supported = 0;
69 wolinfo->wolopts = 0;
70}
71
72static u32
73spider_net_ethtool_get_msglevel(struct net_device *netdev)
74{
75 struct spider_net_card *card;
76 card = netdev_priv(netdev);
77 return card->msg_enable;
78}
79
80static void
81spider_net_ethtool_set_msglevel(struct net_device *netdev,
82 u32 level)
83{
84 struct spider_net_card *card;
85 card = netdev_priv(netdev);
86 card->msg_enable = level;
87}
88
89static int
90spider_net_ethtool_nway_reset(struct net_device *netdev)
91{
92 if (netif_running(netdev)) {
93 spider_net_stop(netdev);
94 spider_net_open(netdev);
95 }
96 return 0;
97}
98
99static u32
100spider_net_ethtool_get_rx_csum(struct net_device *netdev)
101{
102 struct spider_net_card *card = netdev->priv;
103
104 return card->options.rx_csum;
105}
106
107static int
108spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
109{
110 struct spider_net_card *card = netdev->priv;
111
112 card->options.rx_csum = n;
113 return 0;
114}
115
116struct ethtool_ops spider_net_ethtool_ops = {
117 .get_settings = spider_net_ethtool_get_settings,
118 .get_drvinfo = spider_net_ethtool_get_drvinfo,
119 .get_wol = spider_net_ethtool_get_wol,
120 .get_msglevel = spider_net_ethtool_get_msglevel,
121 .set_msglevel = spider_net_ethtool_set_msglevel,
122 .nway_reset = spider_net_ethtool_nway_reset,
123 .get_rx_csum = spider_net_ethtool_get_rx_csum,
124 .set_rx_csum = spider_net_ethtool_set_rx_csum,
125};
126
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1f43bbfbc1c7..5c8fcd40ef4d 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -162,7 +162,7 @@ struct lance_private {
162#define MEM lp->mem 162#define MEM lp->mem
163#define DREG lp->iobase[0] 163#define DREG lp->iobase[0]
164#define AREG lp->iobase[1] 164#define AREG lp->iobase[1]
165#define REGA(a) ( AREG = (a), DREG ) 165#define REGA(a) (*( AREG = (a), &DREG ))
166 166
167/* Definitions for the Lance */ 167/* Definitions for the Lance */
168 168
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dbcb5a8a2194..2be65d308fbe 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3258,7 +3258,7 @@ badrx:
3258 wstats.noise = apriv->wstats.qual.noise; 3258 wstats.noise = apriv->wstats.qual.noise;
3259 wstats.updated = IW_QUAL_LEVEL_UPDATED 3259 wstats.updated = IW_QUAL_LEVEL_UPDATED
3260 | IW_QUAL_QUAL_UPDATED 3260 | IW_QUAL_QUAL_UPDATED
3261 | IW_QUAL_NOISE_UPDATED; 3261 | IW_QUAL_DBM;
3262 /* Update spy records */ 3262 /* Update spy records */
3263 wireless_spy_update(dev, sa, &wstats); 3263 wireless_spy_update(dev, sa, &wstats);
3264 } 3264 }
@@ -3604,7 +3604,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
3604 wstats.noise = ai->wstats.qual.noise; 3604 wstats.noise = ai->wstats.qual.noise;
3605 wstats.updated = IW_QUAL_QUAL_UPDATED 3605 wstats.updated = IW_QUAL_QUAL_UPDATED
3606 | IW_QUAL_LEVEL_UPDATED 3606 | IW_QUAL_LEVEL_UPDATED
3607 | IW_QUAL_NOISE_UPDATED; 3607 | IW_QUAL_DBM;
3608 /* Update spy records */ 3608 /* Update spy records */
3609 wireless_spy_update(ai->dev, sa, &wstats); 3609 wireless_spy_update(ai->dev, sa, &wstats);
3610 } 3610 }
@@ -6489,22 +6489,20 @@ static int airo_get_range(struct net_device *dev,
6489 range->max_qual.qual = 100; /* % */ 6489 range->max_qual.qual = 100; /* % */
6490 else 6490 else
6491 range->max_qual.qual = airo_get_max_quality(&cap_rid); 6491 range->max_qual.qual = airo_get_max_quality(&cap_rid);
6492 range->max_qual.level = 0; /* 0 means we use dBm */ 6492 range->max_qual.level = 0x100 - 120; /* -120 dBm */
6493 range->max_qual.noise = 0; 6493 range->max_qual.noise = 0x100 - 120; /* -120 dBm */
6494 range->max_qual.updated = 0;
6495 6494
6496 /* Experimental measurements - boundary 11/5.5 Mb/s */ 6495 /* Experimental measurements - boundary 11/5.5 Mb/s */
6497 /* Note : with or without the (local->rssi), results 6496 /* Note : with or without the (local->rssi), results
6498 * are somewhat different. - Jean II */ 6497 * are somewhat different. - Jean II */
6499 if (local->rssi) { 6498 if (local->rssi) {
6500 range->avg_qual.qual = 50; /* % */ 6499 range->avg_qual.qual = 50; /* % */
6501 range->avg_qual.level = 186; /* -70 dBm */ 6500 range->avg_qual.level = 0x100 - 70; /* -70 dBm */
6502 } else { 6501 } else {
6503 range->avg_qual.qual = airo_get_avg_quality(&cap_rid); 6502 range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
6504 range->avg_qual.level = 176; /* -80 dBm */ 6503 range->avg_qual.level = 0x100 - 80; /* -80 dBm */
6505 } 6504 }
6506 range->avg_qual.noise = 0; 6505 range->avg_qual.noise = 0x100 - 85; /* -85 dBm */
6507 range->avg_qual.updated = 0;
6508 6506
6509 for(i = 0 ; i < 8 ; i++) { 6507 for(i = 0 ; i < 8 ; i++) {
6510 range->bitrate[i] = cap_rid.supportedRates[i] * 500000; 6508 range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
@@ -6727,15 +6725,17 @@ static int airo_get_aplist(struct net_device *dev,
6727 if (local->rssi) { 6725 if (local->rssi) {
6728 qual[i].level = 0x100 - BSSList.dBm; 6726 qual[i].level = 0x100 - BSSList.dBm;
6729 qual[i].qual = airo_dbm_to_pct( local->rssi, BSSList.dBm ); 6727 qual[i].qual = airo_dbm_to_pct( local->rssi, BSSList.dBm );
6730 qual[i].updated = IW_QUAL_QUAL_UPDATED; 6728 qual[i].updated = IW_QUAL_QUAL_UPDATED
6729 | IW_QUAL_LEVEL_UPDATED
6730 | IW_QUAL_DBM;
6731 } else { 6731 } else {
6732 qual[i].level = (BSSList.dBm + 321) / 2; 6732 qual[i].level = (BSSList.dBm + 321) / 2;
6733 qual[i].qual = 0; 6733 qual[i].qual = 0;
6734 qual[i].updated = IW_QUAL_QUAL_INVALID; 6734 qual[i].updated = IW_QUAL_QUAL_INVALID
6735 | IW_QUAL_LEVEL_UPDATED
6736 | IW_QUAL_DBM;
6735 } 6737 }
6736 qual[i].noise = local->wstats.qual.noise; 6738 qual[i].noise = local->wstats.qual.noise;
6737 qual[i].updated = IW_QUAL_LEVEL_UPDATED
6738 | IW_QUAL_NOISE_UPDATED;
6739 if (BSSList.index == 0xffff) 6739 if (BSSList.index == 0xffff)
6740 break; 6740 break;
6741 } 6741 }
@@ -6861,15 +6861,17 @@ static inline char *airo_translate_scan(struct net_device *dev,
6861 if (ai->rssi) { 6861 if (ai->rssi) {
6862 iwe.u.qual.level = 0x100 - bss->dBm; 6862 iwe.u.qual.level = 0x100 - bss->dBm;
6863 iwe.u.qual.qual = airo_dbm_to_pct( ai->rssi, bss->dBm ); 6863 iwe.u.qual.qual = airo_dbm_to_pct( ai->rssi, bss->dBm );
6864 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED; 6864 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED
6865 | IW_QUAL_LEVEL_UPDATED
6866 | IW_QUAL_DBM;
6865 } else { 6867 } else {
6866 iwe.u.qual.level = (bss->dBm + 321) / 2; 6868 iwe.u.qual.level = (bss->dBm + 321) / 2;
6867 iwe.u.qual.qual = 0; 6869 iwe.u.qual.qual = 0;
6868 iwe.u.qual.updated = IW_QUAL_QUAL_INVALID; 6870 iwe.u.qual.updated = IW_QUAL_QUAL_INVALID
6871 | IW_QUAL_LEVEL_UPDATED
6872 | IW_QUAL_DBM;
6869 } 6873 }
6870 iwe.u.qual.noise = ai->wstats.qual.noise; 6874 iwe.u.qual.noise = ai->wstats.qual.noise;
6871 iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
6872 | IW_QUAL_NOISE_UPDATED;
6873 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); 6875 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
6874 6876
6875 /* Add encryption capability */ 6877 /* Add encryption capability */
@@ -7222,13 +7224,12 @@ static void airo_read_wireless_stats(struct airo_info *local)
7222 local->wstats.qual.level = (status_rid.normalizedSignalStrength + 321) / 2; 7224 local->wstats.qual.level = (status_rid.normalizedSignalStrength + 321) / 2;
7223 local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid); 7225 local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
7224 } 7226 }
7225 local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED;
7226 if (status_rid.len >= 124) { 7227 if (status_rid.len >= 124) {
7227 local->wstats.qual.noise = 0x100 - status_rid.noisedBm; 7228 local->wstats.qual.noise = 0x100 - status_rid.noisedBm;
7228 local->wstats.qual.updated |= IW_QUAL_NOISE_UPDATED; 7229 local->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
7229 } else { 7230 } else {
7230 local->wstats.qual.noise = 0; 7231 local->wstats.qual.noise = 0;
7231 local->wstats.qual.updated |= IW_QUAL_NOISE_INVALID; 7232 local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_DBM;
7232 } 7233 }
7233 7234
7234 /* Packets discarded in the wireless adapter due to wireless 7235 /* Packets discarded in the wireless adapter due to wireless
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index f48a6e729224..587869d86eee 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1593,7 +1593,6 @@ struct net_device *init_atmel_card( unsigned short irq, int port, const AtmelFWT
1593 dev->set_mac_address = atmel_set_mac_address; 1593 dev->set_mac_address = atmel_set_mac_address;
1594 dev->hard_start_xmit = start_tx; 1594 dev->hard_start_xmit = start_tx;
1595 dev->get_stats = atmel_get_stats; 1595 dev->get_stats = atmel_get_stats;
1596 dev->get_wireless_stats = atmel_get_wireless_stats;
1597 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def; 1596 dev->wireless_handlers = (struct iw_handler_def *)&atmel_handler_def;
1598 dev->do_ioctl = atmel_ioctl; 1597 dev->do_ioctl = atmel_ioctl;
1599 dev->irq = irq; 1598 dev->irq = irq;
@@ -2411,7 +2410,8 @@ static const struct iw_handler_def atmel_handler_def =
2411 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args), 2410 .num_private_args = sizeof(atmel_private_args)/sizeof(struct iw_priv_args),
2412 .standard = (iw_handler *) atmel_handler, 2411 .standard = (iw_handler *) atmel_handler,
2413 .private = (iw_handler *) atmel_private_handler, 2412 .private = (iw_handler *) atmel_private_handler,
2414 .private_args = (struct iw_priv_args *) atmel_private_args 2413 .private_args = (struct iw_priv_args *) atmel_private_args,
2414 .get_wireless_stats = atmel_get_wireless_stats
2415}; 2415};
2416 2416
2417static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2417static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2424,19 +2424,6 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2424 char domain[REGDOMAINSZ+1]; 2424 char domain[REGDOMAINSZ+1];
2425 2425
2426 switch (cmd) { 2426 switch (cmd) {
2427 case SIOCGIWPRIV:
2428 if(wrq->u.data.pointer) {
2429 /* Set the number of ioctl available */
2430 wrq->u.data.length = sizeof(atmel_private_args) / sizeof(atmel_private_args[0]);
2431
2432 /* Copy structure to the user buffer */
2433 if (copy_to_user(wrq->u.data.pointer,
2434 (u_char *) atmel_private_args,
2435 sizeof(atmel_private_args)))
2436 rc = -EFAULT;
2437 }
2438 break;
2439
2440 case ATMELIDIFC: 2427 case ATMELIDIFC:
2441 wrq->u.param.value = ATMELMAGIC; 2428 wrq->u.param.value = ATMELMAGIC;
2442 break; 2429 break;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 2a3bd607a5cd..b7f275c00de3 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -72,7 +72,8 @@ static void ipw_rx_queue_replenish(void *);
72static int ipw_up(struct ipw_priv *); 72static int ipw_up(struct ipw_priv *);
73static void ipw_down(struct ipw_priv *); 73static void ipw_down(struct ipw_priv *);
74static int ipw_config(struct ipw_priv *); 74static int ipw_config(struct ipw_priv *);
75static int init_supported_rates(struct ipw_priv *priv, struct ipw_supported_rates *prates); 75static int init_supported_rates(struct ipw_priv *priv,
76 struct ipw_supported_rates *prates);
76 77
77static u8 band_b_active_channel[MAX_B_CHANNELS] = { 78static u8 band_b_active_channel[MAX_B_CHANNELS] = {
78 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0 79 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
@@ -102,7 +103,7 @@ static int is_valid_channel(int mode_mask, int channel)
102} 103}
103 104
104static char *snprint_line(char *buf, size_t count, 105static char *snprint_line(char *buf, size_t count,
105 const u8 *data, u32 len, u32 ofs) 106 const u8 * data, u32 len, u32 ofs)
106{ 107{
107 int out, i, j, l; 108 int out, i, j, l;
108 char c; 109 char c;
@@ -136,7 +137,7 @@ static char *snprint_line(char *buf, size_t count,
136 return buf; 137 return buf;
137} 138}
138 139
139static void printk_buf(int level, const u8 *data, u32 len) 140static void printk_buf(int level, const u8 * data, u32 len)
140{ 141{
141 char line[81]; 142 char line[81];
142 u32 ofs = 0; 143 u32 ofs = 0;
@@ -161,21 +162,24 @@ static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
161static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value); 162static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
162static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c) 163static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
163{ 164{
164 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c)); 165 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
166 __LINE__, (u32) (b), (u32) (c));
165 _ipw_write_reg8(a, b, c); 167 _ipw_write_reg8(a, b, c);
166} 168}
167 169
168static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value); 170static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
169static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c) 171static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
170{ 172{
171 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c)); 173 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
174 __LINE__, (u32) (b), (u32) (c));
172 _ipw_write_reg16(a, b, c); 175 _ipw_write_reg16(a, b, c);
173} 176}
174 177
175static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value); 178static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
176static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c) 179static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
177{ 180{
178 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c)); 181 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
182 __LINE__, (u32) (b), (u32) (c));
179 _ipw_write_reg32(a, b, c); 183 _ipw_write_reg32(a, b, c);
180} 184}
181 185
@@ -195,24 +199,30 @@ static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
195 _ipw_write32(ipw, ofs, val) 199 _ipw_write32(ipw, ofs, val)
196 200
197#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs)) 201#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
198static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) { 202static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
199 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32)(ofs)); 203{
204 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
200 return _ipw_read8(ipw, ofs); 205 return _ipw_read8(ipw, ofs);
201} 206}
207
202#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs) 208#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
203 209
204#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs)) 210#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
205static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) { 211static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
206 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32)(ofs)); 212{
213 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
207 return _ipw_read16(ipw, ofs); 214 return _ipw_read16(ipw, ofs);
208} 215}
216
209#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs) 217#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
210 218
211#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs)) 219#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
212static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) { 220static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
213 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32)(ofs)); 221{
222 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
214 return _ipw_read32(ipw, ofs); 223 return _ipw_read32(ipw, ofs);
215} 224}
225
216#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs) 226#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
217 227
218static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int); 228static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
@@ -220,34 +230,30 @@ static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
220 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ 230 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
221 _ipw_read_indirect(a, b, c, d) 231 _ipw_read_indirect(a, b, c, d)
222 232
223static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *data, int num); 233static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
234 int num);
224#define ipw_write_indirect(a, b, c, d) \ 235#define ipw_write_indirect(a, b, c, d) \
225 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \ 236 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
226 _ipw_write_indirect(a, b, c, d) 237 _ipw_write_indirect(a, b, c, d)
227 238
228/* indirect write s */ 239/* indirect write s */
229static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, 240static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
230 u32 value)
231{ 241{
232 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", 242 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
233 priv, reg, value);
234 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg); 243 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
235 _ipw_write32(priv, CX2_INDIRECT_DATA, value); 244 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
236} 245}
237 246
238
239static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value) 247static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
240{ 248{
241 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 249 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
242 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK); 250 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
243 _ipw_write8(priv, CX2_INDIRECT_DATA, value); 251 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
244 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n", 252 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
245 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), 253 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
246 value);
247} 254}
248 255
249static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, 256static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
250 u16 value)
251{ 257{
252 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value); 258 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
253 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK); 259 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
@@ -262,7 +268,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
262 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK); 268 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
263 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); 269 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
264 word = _ipw_read32(priv, CX2_INDIRECT_DATA); 270 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
265 return (word >> ((reg & 0x3)*8)) & 0xff; 271 return (word >> ((reg & 0x3) * 8)) & 0xff;
266} 272}
267 273
268static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg) 274static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
@@ -302,7 +308,7 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
302 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr); 308 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
303 aligned_len = num & CX2_INDIRECT_ADDR_MASK; 309 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
304 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) 310 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
305 *(u32*)buf = ipw_read32(priv, CX2_AUTOINC_DATA); 311 *(u32 *) buf = ipw_read32(priv, CX2_AUTOINC_DATA);
306 312
307 /* Copy the last nibble */ 313 /* Copy the last nibble */
308 dif_len = num - aligned_len; 314 dif_len = num - aligned_len;
@@ -311,7 +317,7 @@ static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
311 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i); 317 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
312} 318}
313 319
314static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *buf, 320static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
315 int num) 321 int num)
316{ 322{
317 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK; 323 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
@@ -335,7 +341,7 @@ static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *buf,
335 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr); 341 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
336 aligned_len = num & CX2_INDIRECT_ADDR_MASK; 342 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
337 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) 343 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
338 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32*)buf); 344 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
339 345
340 /* Copy the last nibble */ 346 /* Copy the last nibble */
341 dif_len = num - aligned_len; 347 dif_len = num - aligned_len;
@@ -428,20 +434,18 @@ static void ipw_dump_nic_error_log(struct ipw_priv *priv)
428 } 434 }
429 435
430 for (i = ERROR_START_OFFSET; 436 for (i = ERROR_START_OFFSET;
431 i <= count * ERROR_ELEM_SIZE; 437 i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
432 i += ERROR_ELEM_SIZE) { 438 desc = ipw_read_reg32(priv, base + i);
433 desc = ipw_read_reg32(priv, base + i); 439 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
434 time = ipw_read_reg32(priv, base + i + 1*sizeof(u32)); 440 blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
435 blink1 = ipw_read_reg32(priv, base + i + 2*sizeof(u32)); 441 blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
436 blink2 = ipw_read_reg32(priv, base + i + 3*sizeof(u32)); 442 ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
437 ilink1 = ipw_read_reg32(priv, base + i + 4*sizeof(u32)); 443 ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
438 ilink2 = ipw_read_reg32(priv, base + i + 5*sizeof(u32)); 444 idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
439 idata = ipw_read_reg32(priv, base + i + 6*sizeof(u32));
440 445
441 IPW_ERROR( 446 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
442 "%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 447 ipw_error_desc(desc), time, blink1, blink2,
443 ipw_error_desc(desc), time, blink1, blink2, 448 ilink1, ilink2, idata);
444 ilink1, ilink2, idata);
445 } 449 }
446} 450}
447 451
@@ -456,11 +460,10 @@ static void ipw_dump_nic_event_log(struct ipw_priv *priv)
456 IPW_ERROR("Start IPW Event Log Dump:\n"); 460 IPW_ERROR("Start IPW Event Log Dump:\n");
457 461
458 for (i = EVENT_START_OFFSET; 462 for (i = EVENT_START_OFFSET;
459 i <= count * EVENT_ELEM_SIZE; 463 i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
460 i += EVENT_ELEM_SIZE) {
461 ev = ipw_read_reg32(priv, base + i); 464 ev = ipw_read_reg32(priv, base + i);
462 time = ipw_read_reg32(priv, base + i + 1*sizeof(u32)); 465 time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
463 data = ipw_read_reg32(priv, base + i + 2*sizeof(u32)); 466 data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
464 467
465#ifdef CONFIG_IPW_DEBUG 468#ifdef CONFIG_IPW_DEBUG
466 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev); 469 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
@@ -468,8 +471,7 @@ static void ipw_dump_nic_event_log(struct ipw_priv *priv)
468 } 471 }
469} 472}
470 473
471static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, 474static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
472 u32 *len)
473{ 475{
474 u32 addr, field_info, field_len, field_count, total_len; 476 u32 addr, field_info, field_len, field_count, total_len;
475 477
@@ -513,11 +515,11 @@ static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
513 } 515 }
514 516
515 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n", 517 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
516 ord, priv->table0_addr + (ord << 2)); 518 ord, priv->table0_addr + (ord << 2));
517 519
518 *len = sizeof(u32); 520 *len = sizeof(u32);
519 ord <<= 2; 521 ord <<= 2;
520 *((u32 *)val) = ipw_read32(priv, priv->table0_addr + ord); 522 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
521 break; 523 break;
522 524
523 case IPW_ORD_TABLE_1_MASK: 525 case IPW_ORD_TABLE_1_MASK:
@@ -545,7 +547,8 @@ static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
545 return -EINVAL; 547 return -EINVAL;
546 } 548 }
547 549
548 *((u32 *)val) = ipw_read_reg32(priv, (priv->table1_addr + (ord << 2))); 550 *((u32 *) val) =
551 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
549 *len = sizeof(u32); 552 *len = sizeof(u32);
550 break; 553 break;
551 554
@@ -573,13 +576,16 @@ static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
573 576
574 /* get the second DW of statistics ; 577 /* get the second DW of statistics ;
575 * two 16-bit words - first is length, second is count */ 578 * two 16-bit words - first is length, second is count */
576 field_info = ipw_read_reg32(priv, priv->table2_addr + (ord << 3) + sizeof(u32)); 579 field_info =
580 ipw_read_reg32(priv,
581 priv->table2_addr + (ord << 3) +
582 sizeof(u32));
577 583
578 /* get each entry length */ 584 /* get each entry length */
579 field_len = *((u16 *)&field_info); 585 field_len = *((u16 *) & field_info);
580 586
581 /* get number of entries */ 587 /* get number of entries */
582 field_count = *(((u16 *)&field_info) + 1); 588 field_count = *(((u16 *) & field_info) + 1);
583 589
584 /* abort if not enought memory */ 590 /* abort if not enought memory */
585 total_len = field_len * field_count; 591 total_len = field_len * field_count;
@@ -604,7 +610,6 @@ static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
604 610
605 } 611 }
606 612
607
608 return 0; 613 return 0;
609} 614}
610 615
@@ -624,7 +629,7 @@ static void ipw_init_ordinals(struct ipw_priv *priv)
624 629
625 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2); 630 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
626 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr); 631 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
627 priv->table2_len &= 0x0000ffff; /* use first two bytes */ 632 priv->table2_len &= 0x0000ffff; /* use first two bytes */
628 633
629 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n", 634 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
630 priv->table2_addr, priv->table2_len); 635 priv->table2_addr, priv->table2_len);
@@ -643,7 +648,7 @@ static ssize_t show_debug_level(struct device_driver *d, char *buf)
643 return sprintf(buf, "0x%08X\n", ipw_debug_level); 648 return sprintf(buf, "0x%08X\n", ipw_debug_level);
644} 649}
645static ssize_t store_debug_level(struct device_driver *d, 650static ssize_t store_debug_level(struct device_driver *d,
646 const char *buf, size_t count) 651 const char *buf, size_t count)
647{ 652{
648 char *p = (char *)buf; 653 char *p = (char *)buf;
649 u32 val; 654 u32 val;
@@ -668,11 +673,12 @@ static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
668 show_debug_level, store_debug_level); 673 show_debug_level, store_debug_level);
669 674
670static ssize_t show_status(struct device *d, 675static ssize_t show_status(struct device *d,
671 struct device_attribute *attr, char *buf) 676 struct device_attribute *attr, char *buf)
672{ 677{
673 struct ipw_priv *p = d->driver_data; 678 struct ipw_priv *p = d->driver_data;
674 return sprintf(buf, "0x%08x\n", (int)p->status); 679 return sprintf(buf, "0x%08x\n", (int)p->status);
675} 680}
681
676static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); 682static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
677 683
678static ssize_t show_cfg(struct device *d, struct device_attribute *attr, 684static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
@@ -681,10 +687,11 @@ static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
681 struct ipw_priv *p = d->driver_data; 687 struct ipw_priv *p = d->driver_data;
682 return sprintf(buf, "0x%08x\n", (int)p->config); 688 return sprintf(buf, "0x%08x\n", (int)p->config);
683} 689}
690
684static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); 691static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
685 692
686static ssize_t show_nic_type(struct device *d, 693static ssize_t show_nic_type(struct device *d,
687 struct device_attribute *attr, char *buf) 694 struct device_attribute *attr, char *buf)
688{ 695{
689 struct ipw_priv *p = d->driver_data; 696 struct ipw_priv *p = d->driver_data;
690 u8 type = p->eeprom[EEPROM_NIC_TYPE]; 697 u8 type = p->eeprom[EEPROM_NIC_TYPE];
@@ -704,44 +711,50 @@ static ssize_t show_nic_type(struct device *d,
704 711
705 return sprintf(buf, "UNKNOWN\n"); 712 return sprintf(buf, "UNKNOWN\n");
706} 713}
714
707static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL); 715static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
708 716
709static ssize_t dump_error_log(struct device *d, 717static ssize_t dump_error_log(struct device *d,
710 struct device_attribute *attr, const char *buf, size_t count) 718 struct device_attribute *attr, const char *buf,
719 size_t count)
711{ 720{
712 char *p = (char *)buf; 721 char *p = (char *)buf;
713 722
714 if (p[0] == '1') 723 if (p[0] == '1')
715 ipw_dump_nic_error_log((struct ipw_priv*)d->driver_data); 724 ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
716 725
717 return strnlen(buf, count); 726 return strnlen(buf, count);
718} 727}
728
719static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); 729static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
720 730
721static ssize_t dump_event_log(struct device *d, 731static ssize_t dump_event_log(struct device *d,
722 struct device_attribute *attr, const char *buf, size_t count) 732 struct device_attribute *attr, const char *buf,
733 size_t count)
723{ 734{
724 char *p = (char *)buf; 735 char *p = (char *)buf;
725 736
726 if (p[0] == '1') 737 if (p[0] == '1')
727 ipw_dump_nic_event_log((struct ipw_priv*)d->driver_data); 738 ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
728 739
729 return strnlen(buf, count); 740 return strnlen(buf, count);
730} 741}
742
731static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); 743static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
732 744
733static ssize_t show_ucode_version(struct device *d, 745static ssize_t show_ucode_version(struct device *d,
734 struct device_attribute *attr, char *buf) 746 struct device_attribute *attr, char *buf)
735{ 747{
736 u32 len = sizeof(u32), tmp = 0; 748 u32 len = sizeof(u32), tmp = 0;
737 struct ipw_priv *p = d->driver_data; 749 struct ipw_priv *p = d->driver_data;
738 750
739 if(ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len)) 751 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
740 return 0; 752 return 0;
741 753
742 return sprintf(buf, "0x%08x\n", tmp); 754 return sprintf(buf, "0x%08x\n", tmp);
743} 755}
744static DEVICE_ATTR(ucode_version, S_IWUSR|S_IRUGO, show_ucode_version, NULL); 756
757static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
745 758
746static ssize_t show_rtc(struct device *d, struct device_attribute *attr, 759static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
747 char *buf) 760 char *buf)
@@ -749,36 +762,38 @@ static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
749 u32 len = sizeof(u32), tmp = 0; 762 u32 len = sizeof(u32), tmp = 0;
750 struct ipw_priv *p = d->driver_data; 763 struct ipw_priv *p = d->driver_data;
751 764
752 if(ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len)) 765 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
753 return 0; 766 return 0;
754 767
755 return sprintf(buf, "0x%08x\n", tmp); 768 return sprintf(buf, "0x%08x\n", tmp);
756} 769}
757static DEVICE_ATTR(rtc, S_IWUSR|S_IRUGO, show_rtc, NULL); 770
771static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
758 772
759/* 773/*
760 * Add a device attribute to view/control the delay between eeprom 774 * Add a device attribute to view/control the delay between eeprom
761 * operations. 775 * operations.
762 */ 776 */
763static ssize_t show_eeprom_delay(struct device *d, 777static ssize_t show_eeprom_delay(struct device *d,
764 struct device_attribute *attr, char *buf) 778 struct device_attribute *attr, char *buf)
765{ 779{
766 int n = ((struct ipw_priv*)d->driver_data)->eeprom_delay; 780 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
767 return sprintf(buf, "%i\n", n); 781 return sprintf(buf, "%i\n", n);
768} 782}
769static ssize_t store_eeprom_delay(struct device *d, 783static ssize_t store_eeprom_delay(struct device *d,
770 struct device_attribute *attr, const char *buf, 784 struct device_attribute *attr,
771 size_t count) 785 const char *buf, size_t count)
772{ 786{
773 struct ipw_priv *p = d->driver_data; 787 struct ipw_priv *p = d->driver_data;
774 sscanf(buf, "%i", &p->eeprom_delay); 788 sscanf(buf, "%i", &p->eeprom_delay);
775 return strnlen(buf, count); 789 return strnlen(buf, count);
776} 790}
777static DEVICE_ATTR(eeprom_delay, S_IWUSR|S_IRUGO, 791
778 show_eeprom_delay,store_eeprom_delay); 792static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
793 show_eeprom_delay, store_eeprom_delay);
779 794
780static ssize_t show_command_event_reg(struct device *d, 795static ssize_t show_command_event_reg(struct device *d,
781 struct device_attribute *attr, char *buf) 796 struct device_attribute *attr, char *buf)
782{ 797{
783 u32 reg = 0; 798 u32 reg = 0;
784 struct ipw_priv *p = d->driver_data; 799 struct ipw_priv *p = d->driver_data;
@@ -787,8 +802,8 @@ static ssize_t show_command_event_reg(struct device *d,
787 return sprintf(buf, "0x%08x\n", reg); 802 return sprintf(buf, "0x%08x\n", reg);
788} 803}
789static ssize_t store_command_event_reg(struct device *d, 804static ssize_t store_command_event_reg(struct device *d,
790 struct device_attribute *attr, const char *buf, 805 struct device_attribute *attr,
791 size_t count) 806 const char *buf, size_t count)
792{ 807{
793 u32 reg; 808 u32 reg;
794 struct ipw_priv *p = d->driver_data; 809 struct ipw_priv *p = d->driver_data;
@@ -797,11 +812,12 @@ static ssize_t store_command_event_reg(struct device *d,
797 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg); 812 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
798 return strnlen(buf, count); 813 return strnlen(buf, count);
799} 814}
800static DEVICE_ATTR(command_event_reg, S_IWUSR|S_IRUGO, 815
801 show_command_event_reg,store_command_event_reg); 816static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
817 show_command_event_reg, store_command_event_reg);
802 818
803static ssize_t show_mem_gpio_reg(struct device *d, 819static ssize_t show_mem_gpio_reg(struct device *d,
804 struct device_attribute *attr, char *buf) 820 struct device_attribute *attr, char *buf)
805{ 821{
806 u32 reg = 0; 822 u32 reg = 0;
807 struct ipw_priv *p = d->driver_data; 823 struct ipw_priv *p = d->driver_data;
@@ -810,8 +826,8 @@ static ssize_t show_mem_gpio_reg(struct device *d,
810 return sprintf(buf, "0x%08x\n", reg); 826 return sprintf(buf, "0x%08x\n", reg);
811} 827}
812static ssize_t store_mem_gpio_reg(struct device *d, 828static ssize_t store_mem_gpio_reg(struct device *d,
813 struct device_attribute *attr, const char *buf, 829 struct device_attribute *attr,
814 size_t count) 830 const char *buf, size_t count)
815{ 831{
816 u32 reg; 832 u32 reg;
817 struct ipw_priv *p = d->driver_data; 833 struct ipw_priv *p = d->driver_data;
@@ -820,11 +836,12 @@ static ssize_t store_mem_gpio_reg(struct device *d,
820 ipw_write_reg32(p, 0x301100, reg); 836 ipw_write_reg32(p, 0x301100, reg);
821 return strnlen(buf, count); 837 return strnlen(buf, count);
822} 838}
823static DEVICE_ATTR(mem_gpio_reg, S_IWUSR|S_IRUGO, 839
824 show_mem_gpio_reg,store_mem_gpio_reg); 840static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
841 show_mem_gpio_reg, store_mem_gpio_reg);
825 842
826static ssize_t show_indirect_dword(struct device *d, 843static ssize_t show_indirect_dword(struct device *d,
827 struct device_attribute *attr, char *buf) 844 struct device_attribute *attr, char *buf)
828{ 845{
829 u32 reg = 0; 846 u32 reg = 0;
830 struct ipw_priv *priv = d->driver_data; 847 struct ipw_priv *priv = d->driver_data;
@@ -836,8 +853,8 @@ static ssize_t show_indirect_dword(struct device *d,
836 return sprintf(buf, "0x%08x\n", reg); 853 return sprintf(buf, "0x%08x\n", reg);
837} 854}
838static ssize_t store_indirect_dword(struct device *d, 855static ssize_t store_indirect_dword(struct device *d,
839 struct device_attribute *attr, const char *buf, 856 struct device_attribute *attr,
840 size_t count) 857 const char *buf, size_t count)
841{ 858{
842 struct ipw_priv *priv = d->driver_data; 859 struct ipw_priv *priv = d->driver_data;
843 860
@@ -845,11 +862,12 @@ static ssize_t store_indirect_dword(struct device *d,
845 priv->status |= STATUS_INDIRECT_DWORD; 862 priv->status |= STATUS_INDIRECT_DWORD;
846 return strnlen(buf, count); 863 return strnlen(buf, count);
847} 864}
848static DEVICE_ATTR(indirect_dword, S_IWUSR|S_IRUGO, 865
849 show_indirect_dword,store_indirect_dword); 866static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
867 show_indirect_dword, store_indirect_dword);
850 868
851static ssize_t show_indirect_byte(struct device *d, 869static ssize_t show_indirect_byte(struct device *d,
852 struct device_attribute *attr, char *buf) 870 struct device_attribute *attr, char *buf)
853{ 871{
854 u8 reg = 0; 872 u8 reg = 0;
855 struct ipw_priv *priv = d->driver_data; 873 struct ipw_priv *priv = d->driver_data;
@@ -861,8 +879,8 @@ static ssize_t show_indirect_byte(struct device *d,
861 return sprintf(buf, "0x%02x\n", reg); 879 return sprintf(buf, "0x%02x\n", reg);
862} 880}
863static ssize_t store_indirect_byte(struct device *d, 881static ssize_t store_indirect_byte(struct device *d,
864 struct device_attribute *attr, const char *buf, 882 struct device_attribute *attr,
865 size_t count) 883 const char *buf, size_t count)
866{ 884{
867 struct ipw_priv *priv = d->driver_data; 885 struct ipw_priv *priv = d->driver_data;
868 886
@@ -870,11 +888,12 @@ static ssize_t store_indirect_byte(struct device *d,
870 priv->status |= STATUS_INDIRECT_BYTE; 888 priv->status |= STATUS_INDIRECT_BYTE;
871 return strnlen(buf, count); 889 return strnlen(buf, count);
872} 890}
873static DEVICE_ATTR(indirect_byte, S_IWUSR|S_IRUGO, 891
892static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
874 show_indirect_byte, store_indirect_byte); 893 show_indirect_byte, store_indirect_byte);
875 894
876static ssize_t show_direct_dword(struct device *d, 895static ssize_t show_direct_dword(struct device *d,
877 struct device_attribute *attr, char *buf) 896 struct device_attribute *attr, char *buf)
878{ 897{
879 u32 reg = 0; 898 u32 reg = 0;
880 struct ipw_priv *priv = d->driver_data; 899 struct ipw_priv *priv = d->driver_data;
@@ -887,8 +906,8 @@ static ssize_t show_direct_dword(struct device *d,
887 return sprintf(buf, "0x%08x\n", reg); 906 return sprintf(buf, "0x%08x\n", reg);
888} 907}
889static ssize_t store_direct_dword(struct device *d, 908static ssize_t store_direct_dword(struct device *d,
890 struct device_attribute *attr, const char *buf, 909 struct device_attribute *attr,
891 size_t count) 910 const char *buf, size_t count)
892{ 911{
893 struct ipw_priv *priv = d->driver_data; 912 struct ipw_priv *priv = d->driver_data;
894 913
@@ -896,9 +915,9 @@ static ssize_t store_direct_dword(struct device *d,
896 priv->status |= STATUS_DIRECT_DWORD; 915 priv->status |= STATUS_DIRECT_DWORD;
897 return strnlen(buf, count); 916 return strnlen(buf, count);
898} 917}
899static DEVICE_ATTR(direct_dword, S_IWUSR|S_IRUGO,
900 show_direct_dword,store_direct_dword);
901 918
919static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
920 show_direct_dword, store_direct_dword);
902 921
903static inline int rf_kill_active(struct ipw_priv *priv) 922static inline int rf_kill_active(struct ipw_priv *priv)
904{ 923{
@@ -911,7 +930,7 @@ static inline int rf_kill_active(struct ipw_priv *priv)
911} 930}
912 931
913static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, 932static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
914 char *buf) 933 char *buf)
915{ 934{
916 /* 0 - RF kill not enabled 935 /* 0 - RF kill not enabled
917 1 - SW based RF kill active (sysfs) 936 1 - SW based RF kill active (sysfs)
@@ -919,7 +938,7 @@ static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
919 3 - Both HW and SW baed RF kill active */ 938 3 - Both HW and SW baed RF kill active */
920 struct ipw_priv *priv = d->driver_data; 939 struct ipw_priv *priv = d->driver_data;
921 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | 940 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
922 (rf_kill_active(priv) ? 0x2 : 0x0); 941 (rf_kill_active(priv) ? 0x2 : 0x0);
923 return sprintf(buf, "%i\n", val); 942 return sprintf(buf, "%i\n", val);
924} 943}
925 944
@@ -927,7 +946,7 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
927{ 946{
928 if ((disable_radio ? 1 : 0) == 947 if ((disable_radio ? 1 : 0) ==
929 (priv->status & STATUS_RF_KILL_SW ? 1 : 0)) 948 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
930 return 0 ; 949 return 0;
931 950
932 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", 951 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
933 disable_radio ? "OFF" : "ON"); 952 disable_radio ? "OFF" : "ON");
@@ -956,8 +975,8 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
956 return 1; 975 return 1;
957} 976}
958 977
959static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, 978static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
960 const char *buf, size_t count) 979 const char *buf, size_t count)
961{ 980{
962 struct ipw_priv *priv = d->driver_data; 981 struct ipw_priv *priv = d->driver_data;
963 982
@@ -965,7 +984,8 @@ static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
965 984
966 return count; 985 return count;
967} 986}
968static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill); 987
988static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
969 989
970static void ipw_irq_tasklet(struct ipw_priv *priv) 990static void ipw_irq_tasklet(struct ipw_priv *priv)
971{ 991{
@@ -990,7 +1010,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
990 1010
991 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) { 1011 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
992 IPW_DEBUG_HC("Command completed.\n"); 1012 IPW_DEBUG_HC("Command completed.\n");
993 rc = ipw_queue_tx_reclaim( priv, &priv->txq_cmd, -1); 1013 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
994 priv->status &= ~STATUS_HCMD_ACTIVE; 1014 priv->status &= ~STATUS_HCMD_ACTIVE;
995 wake_up_interruptible(&priv->wait_command_queue); 1015 wake_up_interruptible(&priv->wait_command_queue);
996 handled |= CX2_INTA_BIT_TX_CMD_QUEUE; 1016 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
@@ -998,25 +1018,25 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
998 1018
999 if (inta & CX2_INTA_BIT_TX_QUEUE_1) { 1019 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1000 IPW_DEBUG_TX("TX_QUEUE_1\n"); 1020 IPW_DEBUG_TX("TX_QUEUE_1\n");
1001 rc = ipw_queue_tx_reclaim( priv, &priv->txq[0], 0); 1021 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1002 handled |= CX2_INTA_BIT_TX_QUEUE_1; 1022 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1003 } 1023 }
1004 1024
1005 if (inta & CX2_INTA_BIT_TX_QUEUE_2) { 1025 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1006 IPW_DEBUG_TX("TX_QUEUE_2\n"); 1026 IPW_DEBUG_TX("TX_QUEUE_2\n");
1007 rc = ipw_queue_tx_reclaim( priv, &priv->txq[1], 1); 1027 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1008 handled |= CX2_INTA_BIT_TX_QUEUE_2; 1028 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1009 } 1029 }
1010 1030
1011 if (inta & CX2_INTA_BIT_TX_QUEUE_3) { 1031 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1012 IPW_DEBUG_TX("TX_QUEUE_3\n"); 1032 IPW_DEBUG_TX("TX_QUEUE_3\n");
1013 rc = ipw_queue_tx_reclaim( priv, &priv->txq[2], 2); 1033 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1014 handled |= CX2_INTA_BIT_TX_QUEUE_3; 1034 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1015 } 1035 }
1016 1036
1017 if (inta & CX2_INTA_BIT_TX_QUEUE_4) { 1037 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1018 IPW_DEBUG_TX("TX_QUEUE_4\n"); 1038 IPW_DEBUG_TX("TX_QUEUE_4\n");
1019 rc = ipw_queue_tx_reclaim( priv, &priv->txq[3], 3); 1039 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1020 handled |= CX2_INTA_BIT_TX_QUEUE_4; 1040 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1021 } 1041 }
1022 1042
@@ -1074,8 +1094,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1074 } 1094 }
1075 1095
1076 if (handled != inta) { 1096 if (handled != inta) {
1077 IPW_ERROR("Unhandled INTA bits 0x%08x\n", 1097 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
1078 inta & ~handled);
1079 } 1098 }
1080 1099
1081 /* enable all interrupts */ 1100 /* enable all interrupts */
@@ -1143,7 +1162,7 @@ static char *get_cmd_string(u8 cmd)
1143 return "UNKNOWN"; 1162 return "UNKNOWN";
1144 } 1163 }
1145} 1164}
1146#endif /* CONFIG_IPW_DEBUG */ 1165#endif /* CONFIG_IPW_DEBUG */
1147 1166
1148#define HOST_COMPLETE_TIMEOUT HZ 1167#define HOST_COMPLETE_TIMEOUT HZ
1149static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) 1168static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
@@ -1159,15 +1178,16 @@ static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1159 1178
1160 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n", 1179 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1161 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len); 1180 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1162 printk_buf(IPW_DL_HOST_COMMAND, (u8*)cmd->param, cmd->len); 1181 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
1163 1182
1164 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0); 1183 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1165 if (rc) 1184 if (rc)
1166 return rc; 1185 return rc;
1167 1186
1168 rc = wait_event_interruptible_timeout( 1187 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
1169 priv->wait_command_queue, !(priv->status & STATUS_HCMD_ACTIVE), 1188 !(priv->
1170 HOST_COMPLETE_TIMEOUT); 1189 status & STATUS_HCMD_ACTIVE),
1190 HOST_COMPLETE_TIMEOUT);
1171 if (rc == 0) { 1191 if (rc == 0) {
1172 IPW_DEBUG_INFO("Command completion failed out after %dms.\n", 1192 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1173 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1193 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
@@ -1215,7 +1235,7 @@ static int ipw_send_system_config(struct ipw_priv *priv,
1215 return -1; 1235 return -1;
1216 } 1236 }
1217 1237
1218 memcpy(&cmd.param,config,sizeof(*config)); 1238 memcpy(&cmd.param, config, sizeof(*config));
1219 if (ipw_send_cmd(priv, &cmd)) { 1239 if (ipw_send_cmd(priv, &cmd)) {
1220 IPW_ERROR("failed to send SYSTEM_CONFIG command\n"); 1240 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1221 return -1; 1241 return -1;
@@ -1224,7 +1244,7 @@ static int ipw_send_system_config(struct ipw_priv *priv,
1224 return 0; 1244 return 0;
1225} 1245}
1226 1246
1227static int ipw_send_ssid(struct ipw_priv *priv, u8 *ssid, int len) 1247static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
1228{ 1248{
1229 struct host_cmd cmd = { 1249 struct host_cmd cmd = {
1230 .cmd = IPW_CMD_SSID, 1250 .cmd = IPW_CMD_SSID,
@@ -1245,7 +1265,7 @@ static int ipw_send_ssid(struct ipw_priv *priv, u8 *ssid, int len)
1245 return 0; 1265 return 0;
1246} 1266}
1247 1267
1248static int ipw_send_adapter_address(struct ipw_priv *priv, u8 *mac) 1268static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
1249{ 1269{
1250 struct host_cmd cmd = { 1270 struct host_cmd cmd = {
1251 .cmd = IPW_CMD_ADAPTER_ADDRESS, 1271 .cmd = IPW_CMD_ADAPTER_ADDRESS,
@@ -1284,9 +1304,6 @@ static void ipw_adapter_restart(void *adapter)
1284 } 1304 }
1285} 1305}
1286 1306
1287
1288
1289
1290#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 1307#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1291 1308
1292static void ipw_scan_check(void *data) 1309static void ipw_scan_check(void *data)
@@ -1313,7 +1330,7 @@ static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1313 return -1; 1330 return -1;
1314 } 1331 }
1315 1332
1316 memcpy(&cmd.param,request,sizeof(*request)); 1333 memcpy(&cmd.param, request, sizeof(*request));
1317 if (ipw_send_cmd(priv, &cmd)) { 1334 if (ipw_send_cmd(priv, &cmd)) {
1318 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n"); 1335 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1319 return -1; 1336 return -1;
@@ -1351,7 +1368,7 @@ static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1351 .len = sizeof(struct ipw_sensitivity_calib) 1368 .len = sizeof(struct ipw_sensitivity_calib)
1352 }; 1369 };
1353 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *) 1370 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1354 &cmd.param; 1371 &cmd.param;
1355 calib->beacon_rssi_raw = sens; 1372 calib->beacon_rssi_raw = sens;
1356 if (ipw_send_cmd(priv, &cmd)) { 1373 if (ipw_send_cmd(priv, &cmd)) {
1357 IPW_ERROR("failed to send SENSITIVITY CALIB command\n"); 1374 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
@@ -1374,7 +1391,7 @@ static int ipw_send_associate(struct ipw_priv *priv,
1374 return -1; 1391 return -1;
1375 } 1392 }
1376 1393
1377 memcpy(&cmd.param,associate,sizeof(*associate)); 1394 memcpy(&cmd.param, associate, sizeof(*associate));
1378 if (ipw_send_cmd(priv, &cmd)) { 1395 if (ipw_send_cmd(priv, &cmd)) {
1379 IPW_ERROR("failed to send ASSOCIATE command\n"); 1396 IPW_ERROR("failed to send ASSOCIATE command\n");
1380 return -1; 1397 return -1;
@@ -1396,7 +1413,7 @@ static int ipw_send_supported_rates(struct ipw_priv *priv,
1396 return -1; 1413 return -1;
1397 } 1414 }
1398 1415
1399 memcpy(&cmd.param,rates,sizeof(*rates)); 1416 memcpy(&cmd.param, rates, sizeof(*rates));
1400 if (ipw_send_cmd(priv, &cmd)) { 1417 if (ipw_send_cmd(priv, &cmd)) {
1401 IPW_ERROR("failed to send SUPPORTED_RATES command\n"); 1418 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1402 return -1; 1419 return -1;
@@ -1440,7 +1457,7 @@ static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1440 return -1; 1457 return -1;
1441 } 1458 }
1442 1459
1443 *((u32*)&cmd.param) = phy_off; 1460 *((u32 *) & cmd.param) = phy_off;
1444 1461
1445 if (ipw_send_cmd(priv, &cmd)) { 1462 if (ipw_send_cmd(priv, &cmd)) {
1446 IPW_ERROR("failed to send CARD_DISABLE command\n"); 1463 IPW_ERROR("failed to send CARD_DISABLE command\n");
@@ -1451,8 +1468,7 @@ static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1451} 1468}
1452#endif 1469#endif
1453 1470
1454static int ipw_send_tx_power(struct ipw_priv *priv, 1471static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
1455 struct ipw_tx_power *power)
1456{ 1472{
1457 struct host_cmd cmd = { 1473 struct host_cmd cmd = {
1458 .cmd = IPW_CMD_TX_POWER, 1474 .cmd = IPW_CMD_TX_POWER,
@@ -1464,7 +1480,7 @@ static int ipw_send_tx_power(struct ipw_priv *priv,
1464 return -1; 1480 return -1;
1465 } 1481 }
1466 1482
1467 memcpy(&cmd.param,power,sizeof(*power)); 1483 memcpy(&cmd.param, power, sizeof(*power));
1468 if (ipw_send_cmd(priv, &cmd)) { 1484 if (ipw_send_cmd(priv, &cmd)) {
1469 IPW_ERROR("failed to send TX_POWER command\n"); 1485 IPW_ERROR("failed to send TX_POWER command\n");
1470 return -1; 1486 return -1;
@@ -1527,7 +1543,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1527 .cmd = IPW_CMD_POWER_MODE, 1543 .cmd = IPW_CMD_POWER_MODE,
1528 .len = sizeof(u32) 1544 .len = sizeof(u32)
1529 }; 1545 };
1530 u32 *param = (u32*)(&cmd.param); 1546 u32 *param = (u32 *) (&cmd.param);
1531 1547
1532 if (!priv) { 1548 if (!priv) {
1533 IPW_ERROR("Invalid args\n"); 1549 IPW_ERROR("Invalid args\n");
@@ -1585,67 +1601,67 @@ static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1585} 1601}
1586 1602
1587/* perform a chip select operation */ 1603/* perform a chip select operation */
1588static inline void eeprom_cs(struct ipw_priv* priv) 1604static inline void eeprom_cs(struct ipw_priv *priv)
1589{ 1605{
1590 eeprom_write_reg(priv,0); 1606 eeprom_write_reg(priv, 0);
1591 eeprom_write_reg(priv,EEPROM_BIT_CS); 1607 eeprom_write_reg(priv, EEPROM_BIT_CS);
1592 eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK); 1608 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1593 eeprom_write_reg(priv,EEPROM_BIT_CS); 1609 eeprom_write_reg(priv, EEPROM_BIT_CS);
1594} 1610}
1595 1611
1596/* perform a chip select operation */ 1612/* perform a chip select operation */
1597static inline void eeprom_disable_cs(struct ipw_priv* priv) 1613static inline void eeprom_disable_cs(struct ipw_priv *priv)
1598{ 1614{
1599 eeprom_write_reg(priv,EEPROM_BIT_CS); 1615 eeprom_write_reg(priv, EEPROM_BIT_CS);
1600 eeprom_write_reg(priv,0); 1616 eeprom_write_reg(priv, 0);
1601 eeprom_write_reg(priv,EEPROM_BIT_SK); 1617 eeprom_write_reg(priv, EEPROM_BIT_SK);
1602} 1618}
1603 1619
1604/* push a single bit down to the eeprom */ 1620/* push a single bit down to the eeprom */
1605static inline void eeprom_write_bit(struct ipw_priv *p,u8 bit) 1621static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
1606{ 1622{
1607 int d = ( bit ? EEPROM_BIT_DI : 0); 1623 int d = (bit ? EEPROM_BIT_DI : 0);
1608 eeprom_write_reg(p,EEPROM_BIT_CS|d); 1624 eeprom_write_reg(p, EEPROM_BIT_CS | d);
1609 eeprom_write_reg(p,EEPROM_BIT_CS|d|EEPROM_BIT_SK); 1625 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
1610} 1626}
1611 1627
1612/* push an opcode followed by an address down to the eeprom */ 1628/* push an opcode followed by an address down to the eeprom */
1613static void eeprom_op(struct ipw_priv* priv, u8 op, u8 addr) 1629static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
1614{ 1630{
1615 int i; 1631 int i;
1616 1632
1617 eeprom_cs(priv); 1633 eeprom_cs(priv);
1618 eeprom_write_bit(priv,1); 1634 eeprom_write_bit(priv, 1);
1619 eeprom_write_bit(priv,op&2); 1635 eeprom_write_bit(priv, op & 2);
1620 eeprom_write_bit(priv,op&1); 1636 eeprom_write_bit(priv, op & 1);
1621 for ( i=7; i>=0; i-- ) { 1637 for (i = 7; i >= 0; i--) {
1622 eeprom_write_bit(priv,addr&(1<<i)); 1638 eeprom_write_bit(priv, addr & (1 << i));
1623 } 1639 }
1624} 1640}
1625 1641
1626/* pull 16 bits off the eeprom, one bit at a time */ 1642/* pull 16 bits off the eeprom, one bit at a time */
1627static u16 eeprom_read_u16(struct ipw_priv* priv, u8 addr) 1643static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
1628{ 1644{
1629 int i; 1645 int i;
1630 u16 r=0; 1646 u16 r = 0;
1631 1647
1632 /* Send READ Opcode */ 1648 /* Send READ Opcode */
1633 eeprom_op(priv,EEPROM_CMD_READ,addr); 1649 eeprom_op(priv, EEPROM_CMD_READ, addr);
1634 1650
1635 /* Send dummy bit */ 1651 /* Send dummy bit */
1636 eeprom_write_reg(priv,EEPROM_BIT_CS); 1652 eeprom_write_reg(priv, EEPROM_BIT_CS);
1637 1653
1638 /* Read the byte off the eeprom one bit at a time */ 1654 /* Read the byte off the eeprom one bit at a time */
1639 for ( i=0; i<16; i++ ) { 1655 for (i = 0; i < 16; i++) {
1640 u32 data = 0; 1656 u32 data = 0;
1641 eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK); 1657 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
1642 eeprom_write_reg(priv,EEPROM_BIT_CS); 1658 eeprom_write_reg(priv, EEPROM_BIT_CS);
1643 data = ipw_read_reg32(priv,FW_MEM_REG_EEPROM_ACCESS); 1659 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
1644 r = (r<<1) | ((data & EEPROM_BIT_DO)?1:0); 1660 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
1645 } 1661 }
1646 1662
1647 /* Send another dummy bit */ 1663 /* Send another dummy bit */
1648 eeprom_write_reg(priv,0); 1664 eeprom_write_reg(priv, 0);
1649 eeprom_disable_cs(priv); 1665 eeprom_disable_cs(priv);
1650 1666
1651 return r; 1667 return r;
@@ -1653,9 +1669,9 @@ static u16 eeprom_read_u16(struct ipw_priv* priv, u8 addr)
1653 1669
1654/* helper function for pulling the mac address out of the private */ 1670/* helper function for pulling the mac address out of the private */
1655/* data's copy of the eeprom data */ 1671/* data's copy of the eeprom data */
1656static void eeprom_parse_mac(struct ipw_priv* priv, u8* mac) 1672static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
1657{ 1673{
1658 u8* ee = (u8*)priv->eeprom; 1674 u8 *ee = (u8 *) priv->eeprom;
1659 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6); 1675 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1660} 1676}
1661 1677
@@ -1670,26 +1686,25 @@ static void eeprom_parse_mac(struct ipw_priv* priv, u8* mac)
1670static void ipw_eeprom_init_sram(struct ipw_priv *priv) 1686static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1671{ 1687{
1672 int i; 1688 int i;
1673 u16 *eeprom = (u16 *)priv->eeprom; 1689 u16 *eeprom = (u16 *) priv->eeprom;
1674 1690
1675 IPW_DEBUG_TRACE(">>\n"); 1691 IPW_DEBUG_TRACE(">>\n");
1676 1692
1677 /* read entire contents of eeprom into private buffer */ 1693 /* read entire contents of eeprom into private buffer */
1678 for ( i=0; i<128; i++ ) 1694 for (i = 0; i < 128; i++)
1679 eeprom[i] = eeprom_read_u16(priv,(u8)i); 1695 eeprom[i] = eeprom_read_u16(priv, (u8) i);
1680 1696
1681 /* 1697 /*
1682 If the data looks correct, then copy it to our private 1698 If the data looks correct, then copy it to our private
1683 copy. Otherwise let the firmware know to perform the operation 1699 copy. Otherwise let the firmware know to perform the operation
1684 on it's own 1700 on it's own
1685 */ 1701 */
1686 if ((priv->eeprom + EEPROM_VERSION) != 0) { 1702 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1687 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n"); 1703 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1688 1704
1689 /* write the eeprom data to sram */ 1705 /* write the eeprom data to sram */
1690 for( i=0; i<CX2_EEPROM_IMAGE_SIZE; i++ ) 1706 for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
1691 ipw_write8(priv, IPW_EEPROM_DATA + i, 1707 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
1692 priv->eeprom[i]);
1693 1708
1694 /* Do not load eeprom data on fatal error or suspend */ 1709 /* Do not load eeprom data on fatal error or suspend */
1695 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0); 1710 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
@@ -1703,11 +1718,11 @@ static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1703 IPW_DEBUG_TRACE("<<\n"); 1718 IPW_DEBUG_TRACE("<<\n");
1704} 1719}
1705 1720
1706
1707static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count) 1721static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1708{ 1722{
1709 count >>= 2; 1723 count >>= 2;
1710 if (!count) return; 1724 if (!count)
1725 return;
1711 _ipw_write32(priv, CX2_AUTOINC_ADDR, start); 1726 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1712 while (count--) 1727 while (count--)
1713 _ipw_write32(priv, CX2_AUTOINC_DATA, 0); 1728 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
@@ -1721,7 +1736,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1721} 1736}
1722 1737
1723static int ipw_fw_dma_enable(struct ipw_priv *priv) 1738static int ipw_fw_dma_enable(struct ipw_priv *priv)
1724{ /* start dma engine but no transfers yet*/ 1739{ /* start dma engine but no transfers yet */
1725 1740
1726 IPW_DEBUG_FW(">> : \n"); 1741 IPW_DEBUG_FW(">> : \n");
1727 1742
@@ -1749,12 +1764,16 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
1749 IPW_DEBUG_FW("<< \n"); 1764 IPW_DEBUG_FW("<< \n");
1750} 1765}
1751 1766
1752static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, struct command_block *cb) 1767static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
1768 struct command_block *cb)
1753{ 1769{
1754 u32 address = CX2_SHARED_SRAM_DMA_CONTROL + (sizeof(struct command_block) * index); 1770 u32 address =
1771 CX2_SHARED_SRAM_DMA_CONTROL +
1772 (sizeof(struct command_block) * index);
1755 IPW_DEBUG_FW(">> :\n"); 1773 IPW_DEBUG_FW(">> :\n");
1756 1774
1757 ipw_write_indirect(priv, address, (u8*)cb, (int)sizeof(struct command_block)); 1775 ipw_write_indirect(priv, address, (u8 *) cb,
1776 (int)sizeof(struct command_block));
1758 1777
1759 IPW_DEBUG_FW("<< :\n"); 1778 IPW_DEBUG_FW("<< :\n");
1760 return 0; 1779 return 0;
@@ -1764,17 +1783,20 @@ static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, stru
1764static int ipw_fw_dma_kick(struct ipw_priv *priv) 1783static int ipw_fw_dma_kick(struct ipw_priv *priv)
1765{ 1784{
1766 u32 control = 0; 1785 u32 control = 0;
1767 u32 index=0; 1786 u32 index = 0;
1768 1787
1769 IPW_DEBUG_FW(">> :\n"); 1788 IPW_DEBUG_FW(">> :\n");
1770 1789
1771 for (index = 0; index < priv->sram_desc.last_cb_index; index++) 1790 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1772 ipw_fw_dma_write_command_block(priv, index, &priv->sram_desc.cb_list[index]); 1791 ipw_fw_dma_write_command_block(priv, index,
1792 &priv->sram_desc.cb_list[index]);
1773 1793
1774 /* Enable the DMA in the CSR register */ 1794 /* Enable the DMA in the CSR register */
1775 ipw_clear_bit(priv, CX2_RESET_REG,CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER); 1795 ipw_clear_bit(priv, CX2_RESET_REG,
1796 CX2_RESET_REG_MASTER_DISABLED |
1797 CX2_RESET_REG_STOP_MASTER);
1776 1798
1777 /* Set the Start bit. */ 1799 /* Set the Start bit. */
1778 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START; 1800 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1779 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control); 1801 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1780 1802
@@ -1785,25 +1807,25 @@ static int ipw_fw_dma_kick(struct ipw_priv *priv)
1785static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv) 1807static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1786{ 1808{
1787 u32 address; 1809 u32 address;
1788 u32 register_value=0; 1810 u32 register_value = 0;
1789 u32 cb_fields_address=0; 1811 u32 cb_fields_address = 0;
1790 1812
1791 IPW_DEBUG_FW(">> :\n"); 1813 IPW_DEBUG_FW(">> :\n");
1792 address = ipw_read_reg32(priv,CX2_DMA_I_CURRENT_CB); 1814 address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1793 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n",address); 1815 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
1794 1816
1795 /* Read the DMA Controlor register */ 1817 /* Read the DMA Controlor register */
1796 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL); 1818 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1797 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n",register_value); 1819 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
1798 1820
1799 /* Print the CB values*/ 1821 /* Print the CB values */
1800 cb_fields_address = address; 1822 cb_fields_address = address;
1801 register_value = ipw_read_reg32(priv, cb_fields_address); 1823 register_value = ipw_read_reg32(priv, cb_fields_address);
1802 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n",register_value); 1824 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
1803 1825
1804 cb_fields_address += sizeof(u32); 1826 cb_fields_address += sizeof(u32);
1805 register_value = ipw_read_reg32(priv, cb_fields_address); 1827 register_value = ipw_read_reg32(priv, cb_fields_address);
1806 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n",register_value); 1828 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
1807 1829
1808 cb_fields_address += sizeof(u32); 1830 cb_fields_address += sizeof(u32);
1809 register_value = ipw_read_reg32(priv, cb_fields_address); 1831 register_value = ipw_read_reg32(priv, cb_fields_address);
@@ -1812,7 +1834,7 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1812 1834
1813 cb_fields_address += sizeof(u32); 1835 cb_fields_address += sizeof(u32);
1814 register_value = ipw_read_reg32(priv, cb_fields_address); 1836 register_value = ipw_read_reg32(priv, cb_fields_address);
1815 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n",register_value); 1837 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
1816 1838
1817 IPW_DEBUG_FW(">> :\n"); 1839 IPW_DEBUG_FW(">> :\n");
1818} 1840}
@@ -1823,13 +1845,13 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1823 u32 current_cb_index = 0; 1845 u32 current_cb_index = 0;
1824 1846
1825 IPW_DEBUG_FW("<< :\n"); 1847 IPW_DEBUG_FW("<< :\n");
1826 current_cb_address= ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB); 1848 current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1827 1849
1828 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL )/ 1850 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
1829 sizeof (struct command_block); 1851 sizeof(struct command_block);
1830 1852
1831 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", 1853 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1832 current_cb_index, current_cb_address ); 1854 current_cb_index, current_cb_address);
1833 1855
1834 IPW_DEBUG_FW(">> :\n"); 1856 IPW_DEBUG_FW(">> :\n");
1835 return current_cb_index; 1857 return current_cb_index;
@@ -1840,15 +1862,14 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1840 u32 src_address, 1862 u32 src_address,
1841 u32 dest_address, 1863 u32 dest_address,
1842 u32 length, 1864 u32 length,
1843 int interrupt_enabled, 1865 int interrupt_enabled, int is_last)
1844 int is_last)
1845{ 1866{
1846 1867
1847 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC | 1868 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1848 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG | 1869 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1849 CB_DEST_SIZE_LONG; 1870 CB_DEST_SIZE_LONG;
1850 struct command_block *cb; 1871 struct command_block *cb;
1851 u32 last_cb_element=0; 1872 u32 last_cb_element = 0;
1852 1873
1853 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n", 1874 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1854 src_address, dest_address, length); 1875 src_address, dest_address, length);
@@ -1861,7 +1882,7 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1861 priv->sram_desc.last_cb_index++; 1882 priv->sram_desc.last_cb_index++;
1862 1883
1863 /* Calculate the new CB control word */ 1884 /* Calculate the new CB control word */
1864 if (interrupt_enabled ) 1885 if (interrupt_enabled)
1865 control |= CB_INT_ENABLED; 1886 control |= CB_INT_ENABLED;
1866 1887
1867 if (is_last) 1888 if (is_last)
@@ -1870,7 +1891,7 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1870 control |= length; 1891 control |= length;
1871 1892
1872 /* Calculate the CB Element's checksum value */ 1893 /* Calculate the CB Element's checksum value */
1873 cb->status = control ^src_address ^dest_address; 1894 cb->status = control ^ src_address ^ dest_address;
1874 1895
1875 /* Copy the Source and Destination addresses */ 1896 /* Copy the Source and Destination addresses */
1876 cb->dest_addr = dest_address; 1897 cb->dest_addr = dest_address;
@@ -1883,22 +1904,21 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1883} 1904}
1884 1905
1885static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, 1906static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1886 u32 src_phys, 1907 u32 src_phys, u32 dest_address, u32 length)
1887 u32 dest_address,
1888 u32 length)
1889{ 1908{
1890 u32 bytes_left = length; 1909 u32 bytes_left = length;
1891 u32 src_offset=0; 1910 u32 src_offset = 0;
1892 u32 dest_offset=0; 1911 u32 dest_offset = 0;
1893 int status = 0; 1912 int status = 0;
1894 IPW_DEBUG_FW(">> \n"); 1913 IPW_DEBUG_FW(">> \n");
1895 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", 1914 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1896 src_phys, dest_address, length); 1915 src_phys, dest_address, length);
1897 while (bytes_left > CB_MAX_LENGTH) { 1916 while (bytes_left > CB_MAX_LENGTH) {
1898 status = ipw_fw_dma_add_command_block( priv, 1917 status = ipw_fw_dma_add_command_block(priv,
1899 src_phys + src_offset, 1918 src_phys + src_offset,
1900 dest_address + dest_offset, 1919 dest_address +
1901 CB_MAX_LENGTH, 0, 0); 1920 dest_offset,
1921 CB_MAX_LENGTH, 0, 0);
1902 if (status) { 1922 if (status) {
1903 IPW_DEBUG_FW_INFO(": Failed\n"); 1923 IPW_DEBUG_FW_INFO(": Failed\n");
1904 return -1; 1924 return -1;
@@ -1912,18 +1932,18 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1912 1932
1913 /* add the buffer tail */ 1933 /* add the buffer tail */
1914 if (bytes_left > 0) { 1934 if (bytes_left > 0) {
1915 status = ipw_fw_dma_add_command_block( 1935 status =
1916 priv, src_phys + src_offset, 1936 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
1917 dest_address + dest_offset, 1937 dest_address + dest_offset,
1918 bytes_left, 0, 0); 1938 bytes_left, 0, 0);
1919 if (status) { 1939 if (status) {
1920 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n"); 1940 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1921 return -1; 1941 return -1;
1922 } else 1942 } else
1923 IPW_DEBUG_FW_INFO(": Adding new cb - the buffer tail\n"); 1943 IPW_DEBUG_FW_INFO
1944 (": Adding new cb - the buffer tail\n");
1924 } 1945 }
1925 1946
1926
1927 IPW_DEBUG_FW("<< \n"); 1947 IPW_DEBUG_FW("<< \n");
1928 return 0; 1948 return 0;
1929} 1949}
@@ -1937,7 +1957,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
1937 1957
1938 current_index = ipw_fw_dma_command_block_index(priv); 1958 current_index = ipw_fw_dma_command_block_index(priv);
1939 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n", 1959 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1940 (int) priv->sram_desc.last_cb_index); 1960 (int)priv->sram_desc.last_cb_index);
1941 1961
1942 while (current_index < priv->sram_desc.last_cb_index) { 1962 while (current_index < priv->sram_desc.last_cb_index) {
1943 udelay(50); 1963 udelay(50);
@@ -1955,8 +1975,8 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
1955 1975
1956 ipw_fw_dma_abort(priv); 1976 ipw_fw_dma_abort(priv);
1957 1977
1958 /*Disable the DMA in the CSR register*/ 1978 /*Disable the DMA in the CSR register */
1959 ipw_set_bit(priv, CX2_RESET_REG, 1979 ipw_set_bit(priv, CX2_RESET_REG,
1960 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER); 1980 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1961 1981
1962 IPW_DEBUG_FW("<< dmaWaitSync \n"); 1982 IPW_DEBUG_FW("<< dmaWaitSync \n");
@@ -2011,8 +2031,7 @@ static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2011 * image and the caller is handling the memory allocation and clean up. 2031 * image and the caller is handling the memory allocation and clean up.
2012 */ 2032 */
2013 2033
2014 2034static int ipw_stop_master(struct ipw_priv *priv)
2015static int ipw_stop_master(struct ipw_priv * priv)
2016{ 2035{
2017 int rc; 2036 int rc;
2018 2037
@@ -2071,14 +2090,13 @@ struct fw_chunk {
2071#define IPW_FW_NAME(x) "ipw2200_" x ".fw" 2090#define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2072#endif 2091#endif
2073 2092
2074static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, 2093static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
2075 size_t len)
2076{ 2094{
2077 int rc = 0, i, addr; 2095 int rc = 0, i, addr;
2078 u8 cr = 0; 2096 u8 cr = 0;
2079 u16 *image; 2097 u16 *image;
2080 2098
2081 image = (u16 *)data; 2099 image = (u16 *) data;
2082 2100
2083 IPW_DEBUG_TRACE(">> \n"); 2101 IPW_DEBUG_TRACE(">> \n");
2084 2102
@@ -2087,7 +2105,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2087 if (rc < 0) 2105 if (rc < 0)
2088 return rc; 2106 return rc;
2089 2107
2090// spin_lock_irqsave(&priv->lock, flags); 2108// spin_lock_irqsave(&priv->lock, flags);
2091 2109
2092 for (addr = CX2_SHARED_LOWER_BOUND; 2110 for (addr = CX2_SHARED_LOWER_BOUND;
2093 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) { 2111 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
@@ -2099,7 +2117,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2099 /* destroy DMA queues */ 2117 /* destroy DMA queues */
2100 /* reset sequence */ 2118 /* reset sequence */
2101 2119
2102 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET ,CX2_BIT_HALT_RESET_ON); 2120 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
2103 ipw_arc_release(priv); 2121 ipw_arc_release(priv);
2104 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF); 2122 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2105 mdelay(1); 2123 mdelay(1);
@@ -2128,13 +2146,11 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2128 for (i = 0; i < len / 2; i++) 2146 for (i = 0; i < len / 2; i++)
2129 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]); 2147 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2130 2148
2131
2132 /* enable DINO */ 2149 /* enable DINO */
2133 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0); 2150 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2134 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 2151 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
2135 DINO_ENABLE_SYSTEM );
2136 2152
2137 /* this is where the igx / win driver deveates from the VAP driver.*/ 2153 /* this is where the igx / win driver deveates from the VAP driver. */
2138 2154
2139 /* wait for alive response */ 2155 /* wait for alive response */
2140 for (i = 0; i < 100; i++) { 2156 for (i = 0; i < 100; i++) {
@@ -2151,25 +2167,24 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2151 2167
2152 for (i = 0; i < ARRAY_SIZE(response_buffer); i++) 2168 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2153 response_buffer[i] = 2169 response_buffer[i] =
2154 ipw_read_reg32(priv, 2170 ipw_read_reg32(priv, CX2_BASEBAND_RX_FIFO_READ);
2155 CX2_BASEBAND_RX_FIFO_READ);
2156 memcpy(&priv->dino_alive, response_buffer, 2171 memcpy(&priv->dino_alive, response_buffer,
2157 sizeof(priv->dino_alive)); 2172 sizeof(priv->dino_alive));
2158 if (priv->dino_alive.alive_command == 1 2173 if (priv->dino_alive.alive_command == 1
2159 && priv->dino_alive.ucode_valid == 1) { 2174 && priv->dino_alive.ucode_valid == 1) {
2160 rc = 0; 2175 rc = 0;
2161 IPW_DEBUG_INFO( 2176 IPW_DEBUG_INFO
2162 "Microcode OK, rev. %d (0x%x) dev. %d (0x%x) " 2177 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2163 "of %02d/%02d/%02d %02d:%02d\n", 2178 "of %02d/%02d/%02d %02d:%02d\n",
2164 priv->dino_alive.software_revision, 2179 priv->dino_alive.software_revision,
2165 priv->dino_alive.software_revision, 2180 priv->dino_alive.software_revision,
2166 priv->dino_alive.device_identifier, 2181 priv->dino_alive.device_identifier,
2167 priv->dino_alive.device_identifier, 2182 priv->dino_alive.device_identifier,
2168 priv->dino_alive.time_stamp[0], 2183 priv->dino_alive.time_stamp[0],
2169 priv->dino_alive.time_stamp[1], 2184 priv->dino_alive.time_stamp[1],
2170 priv->dino_alive.time_stamp[2], 2185 priv->dino_alive.time_stamp[2],
2171 priv->dino_alive.time_stamp[3], 2186 priv->dino_alive.time_stamp[3],
2172 priv->dino_alive.time_stamp[4]); 2187 priv->dino_alive.time_stamp[4]);
2173 } else { 2188 } else {
2174 IPW_DEBUG_INFO("Microcode is not alive\n"); 2189 IPW_DEBUG_INFO("Microcode is not alive\n");
2175 rc = -EINVAL; 2190 rc = -EINVAL;
@@ -2183,13 +2198,12 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2183 firmware have problem getting alive resp. */ 2198 firmware have problem getting alive resp. */
2184 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0); 2199 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2185 2200
2186// spin_unlock_irqrestore(&priv->lock, flags); 2201// spin_unlock_irqrestore(&priv->lock, flags);
2187 2202
2188 return rc; 2203 return rc;
2189} 2204}
2190 2205
2191static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, 2206static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
2192 size_t len)
2193{ 2207{
2194 int rc = -1; 2208 int rc = -1;
2195 int offset = 0; 2209 int offset = 0;
@@ -2231,7 +2245,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data,
2231 offset += chunk->length; 2245 offset += chunk->length;
2232 } while (offset < len); 2246 } while (offset < len);
2233 2247
2234 /* Run the DMA and wait for the answer*/ 2248 /* Run the DMA and wait for the answer */
2235 rc = ipw_fw_dma_kick(priv); 2249 rc = ipw_fw_dma_kick(priv);
2236 if (rc) { 2250 if (rc) {
2237 IPW_ERROR("dmaKick Failed\n"); 2251 IPW_ERROR("dmaKick Failed\n");
@@ -2243,8 +2257,8 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data,
2243 IPW_ERROR("dmaWaitSync Failed\n"); 2257 IPW_ERROR("dmaWaitSync Failed\n");
2244 goto out; 2258 goto out;
2245 } 2259 }
2246 out: 2260 out:
2247 pci_free_consistent( priv->pci_dev, len, shared_virt, shared_phys); 2261 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
2248 return rc; 2262 return rc;
2249} 2263}
2250 2264
@@ -2253,7 +2267,7 @@ static int ipw_stop_nic(struct ipw_priv *priv)
2253{ 2267{
2254 int rc = 0; 2268 int rc = 0;
2255 2269
2256 /* stop*/ 2270 /* stop */
2257 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER); 2271 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2258 2272
2259 rc = ipw_poll_bit(priv, CX2_RESET_REG, 2273 rc = ipw_poll_bit(priv, CX2_RESET_REG,
@@ -2272,14 +2286,15 @@ static void ipw_start_nic(struct ipw_priv *priv)
2272{ 2286{
2273 IPW_DEBUG_TRACE(">>\n"); 2287 IPW_DEBUG_TRACE(">>\n");
2274 2288
2275 /* prvHwStartNic release ARC*/ 2289 /* prvHwStartNic release ARC */
2276 ipw_clear_bit(priv, CX2_RESET_REG, 2290 ipw_clear_bit(priv, CX2_RESET_REG,
2277 CX2_RESET_REG_MASTER_DISABLED | 2291 CX2_RESET_REG_MASTER_DISABLED |
2278 CX2_RESET_REG_STOP_MASTER | 2292 CX2_RESET_REG_STOP_MASTER |
2279 CBD_RESET_REG_PRINCETON_RESET); 2293 CBD_RESET_REG_PRINCETON_RESET);
2280 2294
2281 /* enable power management */ 2295 /* enable power management */
2282 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); 2296 ipw_set_bit(priv, CX2_GP_CNTRL_RW,
2297 CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2283 2298
2284 IPW_DEBUG_TRACE("<<\n"); 2299 IPW_DEBUG_TRACE("<<\n");
2285} 2300}
@@ -2295,12 +2310,13 @@ static int ipw_init_nic(struct ipw_priv *priv)
2295 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE); 2310 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2296 2311
2297 /* low-level PLL activation */ 2312 /* low-level PLL activation */
2298 ipw_write32(priv, CX2_READ_INT_REGISTER, CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER); 2313 ipw_write32(priv, CX2_READ_INT_REGISTER,
2314 CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2299 2315
2300 /* wait for clock stabilization */ 2316 /* wait for clock stabilization */
2301 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW, 2317 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2302 CX2_GP_CNTRL_BIT_CLOCK_READY, 250); 2318 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2303 if (rc < 0 ) 2319 if (rc < 0)
2304 IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); 2320 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2305 2321
2306 /* assert SW reset */ 2322 /* assert SW reset */
@@ -2315,7 +2331,6 @@ static int ipw_init_nic(struct ipw_priv *priv)
2315 return 0; 2331 return 0;
2316} 2332}
2317 2333
2318
2319/* Call this function from process context, it will sleep in request_firmware. 2334/* Call this function from process context, it will sleep in request_firmware.
2320 * Probe is an ok place to call this from. 2335 * Probe is an ok place to call this from.
2321 */ 2336 */
@@ -2383,8 +2398,7 @@ static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2383 * to an SKB, so we need to unmap and free potential storage */ 2398 * to an SKB, so we need to unmap and free potential storage */
2384 if (rxq->pool[i].skb != NULL) { 2399 if (rxq->pool[i].skb != NULL) {
2385 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 2400 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2386 CX2_RX_BUF_SIZE, 2401 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
2387 PCI_DMA_FROMDEVICE);
2388 dev_kfree_skb(rxq->pool[i].skb); 2402 dev_kfree_skb(rxq->pool[i].skb);
2389 } 2403 }
2390 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 2404 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -2438,12 +2452,12 @@ static int ipw_load(struct ipw_priv *priv)
2438 if (rc) 2452 if (rc)
2439 goto error; 2453 goto error;
2440 2454
2441 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("sniffer")); 2455 rc = ipw_get_fw(priv, &firmware,
2456 IPW_FW_NAME("sniffer"));
2442 break; 2457 break;
2443#endif 2458#endif
2444 case IW_MODE_INFRA: 2459 case IW_MODE_INFRA:
2445 rc = ipw_get_fw(priv, &ucode, 2460 rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
2446 IPW_FW_NAME("bss_ucode"));
2447 if (rc) 2461 if (rc)
2448 goto error; 2462 goto error;
2449 2463
@@ -2471,7 +2485,7 @@ static int ipw_load(struct ipw_priv *priv)
2471 goto error; 2485 goto error;
2472 } 2486 }
2473 2487
2474 retry: 2488 retry:
2475 /* Ensure interrupts are disabled */ 2489 /* Ensure interrupts are disabled */
2476 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL); 2490 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2477 priv->status &= ~STATUS_INT_ENABLED; 2491 priv->status &= ~STATUS_INT_ENABLED;
@@ -2528,7 +2542,7 @@ static int ipw_load(struct ipw_priv *priv)
2528 rc = ipw_load_firmware(priv, firmware->data + 2542 rc = ipw_load_firmware(priv, firmware->data +
2529 sizeof(struct fw_header), 2543 sizeof(struct fw_header),
2530 firmware->size - sizeof(struct fw_header)); 2544 firmware->size - sizeof(struct fw_header));
2531 if (rc < 0 ) { 2545 if (rc < 0) {
2532 IPW_ERROR("Unable to load firmware\n"); 2546 IPW_ERROR("Unable to load firmware\n");
2533 goto error; 2547 goto error;
2534 } 2548 }
@@ -2593,7 +2607,7 @@ static int ipw_load(struct ipw_priv *priv)
2593#endif 2607#endif
2594 return 0; 2608 return 0;
2595 2609
2596 error: 2610 error:
2597 if (priv->rxq) { 2611 if (priv->rxq) {
2598 ipw_rx_queue_free(priv, priv->rxq); 2612 ipw_rx_queue_free(priv, priv->rxq);
2599 priv->rxq = NULL; 2613 priv->rxq = NULL;
@@ -2671,8 +2685,7 @@ static inline int ipw_queue_inc_wrap(int index, int n_bd)
2671 * (not offset within BAR, full address) 2685 * (not offset within BAR, full address)
2672 */ 2686 */
2673static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, 2687static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2674 int count, u32 read, u32 write, 2688 int count, u32 read, u32 write, u32 base, u32 size)
2675 u32 base, u32 size)
2676{ 2689{
2677 q->n_bd = count; 2690 q->n_bd = count;
2678 2691
@@ -2698,8 +2711,7 @@ static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2698 2711
2699static int ipw_queue_tx_init(struct ipw_priv *priv, 2712static int ipw_queue_tx_init(struct ipw_priv *priv,
2700 struct clx2_tx_queue *q, 2713 struct clx2_tx_queue *q,
2701 int count, u32 read, u32 write, 2714 int count, u32 read, u32 write, u32 base, u32 size)
2702 u32 base, u32 size)
2703{ 2715{
2704 struct pci_dev *dev = priv->pci_dev; 2716 struct pci_dev *dev = priv->pci_dev;
2705 2717
@@ -2709,10 +2721,11 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
2709 return -ENOMEM; 2721 return -ENOMEM;
2710 } 2722 }
2711 2723
2712 q->bd = pci_alloc_consistent(dev,sizeof(q->bd[0])*count, &q->q.dma_addr); 2724 q->bd =
2725 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
2713 if (!q->bd) { 2726 if (!q->bd) {
2714 IPW_ERROR("pci_alloc_consistent(%zd) failed\n", 2727 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
2715 sizeof(q->bd[0]) * count); 2728 sizeof(q->bd[0]) * count);
2716 kfree(q->txb); 2729 kfree(q->txb);
2717 q->txb = NULL; 2730 q->txb = NULL;
2718 return -ENOMEM; 2731 return -ENOMEM;
@@ -2768,8 +2781,7 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2768 * @param dev 2781 * @param dev
2769 * @param q 2782 * @param q
2770 */ 2783 */
2771static void ipw_queue_tx_free(struct ipw_priv *priv, 2784static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
2772 struct clx2_tx_queue *txq)
2773{ 2785{
2774 struct clx2_queue *q = &txq->q; 2786 struct clx2_queue *q = &txq->q;
2775 struct pci_dev *dev = priv->pci_dev; 2787 struct pci_dev *dev = priv->pci_dev;
@@ -2784,7 +2796,7 @@ static void ipw_queue_tx_free(struct ipw_priv *priv,
2784 } 2796 }
2785 2797
2786 /* free buffers belonging to queue itself */ 2798 /* free buffers belonging to queue itself */
2787 pci_free_consistent(dev, sizeof(txq->bd[0])*q->n_bd, txq->bd, 2799 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
2788 q->dma_addr); 2800 q->dma_addr);
2789 kfree(txq->txb); 2801 kfree(txq->txb);
2790 2802
@@ -2792,7 +2804,6 @@ static void ipw_queue_tx_free(struct ipw_priv *priv,
2792 memset(txq, 0, sizeof(*txq)); 2804 memset(txq, 0, sizeof(*txq));
2793} 2805}
2794 2806
2795
2796/** 2807/**
2797 * Destroy all DMA queues and structures 2808 * Destroy all DMA queues and structures
2798 * 2809 *
@@ -2825,7 +2836,7 @@ static void inline __maybe_wake_tx(struct ipw_priv *priv)
2825 2836
2826} 2837}
2827 2838
2828static inline void ipw_create_bssid(struct ipw_priv *priv, u8 *bssid) 2839static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
2829{ 2840{
2830 /* First 3 bytes are manufacturer */ 2841 /* First 3 bytes are manufacturer */
2831 bssid[0] = priv->mac_addr[0]; 2842 bssid[0] = priv->mac_addr[0];
@@ -2833,13 +2844,13 @@ static inline void ipw_create_bssid(struct ipw_priv *priv, u8 *bssid)
2833 bssid[2] = priv->mac_addr[2]; 2844 bssid[2] = priv->mac_addr[2];
2834 2845
2835 /* Last bytes are random */ 2846 /* Last bytes are random */
2836 get_random_bytes(&bssid[3], ETH_ALEN-3); 2847 get_random_bytes(&bssid[3], ETH_ALEN - 3);
2837 2848
2838 bssid[0] &= 0xfe; /* clear multicast bit */ 2849 bssid[0] &= 0xfe; /* clear multicast bit */
2839 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */ 2850 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2840} 2851}
2841 2852
2842static inline u8 ipw_add_station(struct ipw_priv *priv, u8 *bssid) 2853static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
2843{ 2854{
2844 struct ipw_station_entry entry; 2855 struct ipw_station_entry entry;
2845 int i; 2856 int i;
@@ -2866,14 +2877,13 @@ static inline u8 ipw_add_station(struct ipw_priv *priv, u8 *bssid)
2866 memcpy(entry.mac_addr, bssid, ETH_ALEN); 2877 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2867 memcpy(priv->stations[i], bssid, ETH_ALEN); 2878 memcpy(priv->stations[i], bssid, ETH_ALEN);
2868 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry), 2879 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2869 &entry, 2880 &entry, sizeof(entry));
2870 sizeof(entry));
2871 priv->num_stations++; 2881 priv->num_stations++;
2872 2882
2873 return i; 2883 return i;
2874} 2884}
2875 2885
2876static inline u8 ipw_find_station(struct ipw_priv *priv, u8 *bssid) 2886static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
2877{ 2887{
2878 int i; 2888 int i;
2879 2889
@@ -2944,26 +2954,34 @@ static const struct ipw_status_code ipw_status_codes[] = {
2944 "association exists"}, 2954 "association exists"},
2945 {0x0C, "Association denied due to reason outside the scope of this " 2955 {0x0C, "Association denied due to reason outside the scope of this "
2946 "standard"}, 2956 "standard"},
2947 {0x0D, "Responding station does not support the specified authentication " 2957 {0x0D,
2958 "Responding station does not support the specified authentication "
2948 "algorithm"}, 2959 "algorithm"},
2949 {0x0E, "Received an Authentication frame with authentication sequence " 2960 {0x0E,
2961 "Received an Authentication frame with authentication sequence "
2950 "transaction sequence number out of expected sequence"}, 2962 "transaction sequence number out of expected sequence"},
2951 {0x0F, "Authentication rejected because of challenge failure"}, 2963 {0x0F, "Authentication rejected because of challenge failure"},
2952 {0x10, "Authentication rejected due to timeout waiting for next " 2964 {0x10, "Authentication rejected due to timeout waiting for next "
2953 "frame in sequence"}, 2965 "frame in sequence"},
2954 {0x11, "Association denied because AP is unable to handle additional " 2966 {0x11, "Association denied because AP is unable to handle additional "
2955 "associated stations"}, 2967 "associated stations"},
2956 {0x12, "Association denied due to requesting station not supporting all " 2968 {0x12,
2969 "Association denied due to requesting station not supporting all "
2957 "of the datarates in the BSSBasicServiceSet Parameter"}, 2970 "of the datarates in the BSSBasicServiceSet Parameter"},
2958 {0x13, "Association denied due to requesting station not supporting " 2971 {0x13,
2972 "Association denied due to requesting station not supporting "
2959 "short preamble operation"}, 2973 "short preamble operation"},
2960 {0x14, "Association denied due to requesting station not supporting " 2974 {0x14,
2975 "Association denied due to requesting station not supporting "
2961 "PBCC encoding"}, 2976 "PBCC encoding"},
2962 {0x15, "Association denied due to requesting station not supporting " 2977 {0x15,
2978 "Association denied due to requesting station not supporting "
2963 "channel agility"}, 2979 "channel agility"},
2964 {0x19, "Association denied due to requesting station not supporting " 2980 {0x19,
2981 "Association denied due to requesting station not supporting "
2965 "short slot operation"}, 2982 "short slot operation"},
2966 {0x1A, "Association denied due to requesting station not supporting " 2983 {0x1A,
2984 "Association denied due to requesting station not supporting "
2967 "DSSS-OFDM operation"}, 2985 "DSSS-OFDM operation"},
2968 {0x28, "Invalid Information Element"}, 2986 {0x28, "Invalid Information Element"},
2969 {0x29, "Group Cipher is not valid"}, 2987 {0x29, "Group Cipher is not valid"},
@@ -3043,7 +3061,6 @@ static void ipw_reset_stats(struct ipw_priv *priv)
3043 3061
3044} 3062}
3045 3063
3046
3047static inline u32 ipw_get_max_rate(struct ipw_priv *priv) 3064static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3048{ 3065{
3049 u32 i = 0x80000000; 3066 u32 i = 0x80000000;
@@ -3056,20 +3073,21 @@ static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3056 /* TODO: Verify that the rate is supported by the current rates 3073 /* TODO: Verify that the rate is supported by the current rates
3057 * list. */ 3074 * list. */
3058 3075
3059 while (i && !(mask & i)) i >>= 1; 3076 while (i && !(mask & i))
3077 i >>= 1;
3060 switch (i) { 3078 switch (i) {
3061 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000; 3079 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
3062 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000; 3080 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
3063 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000; 3081 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
3064 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000; 3082 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
3065 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000; 3083 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
3066 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000; 3084 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
3067 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000; 3085 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
3068 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000; 3086 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
3069 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000; 3087 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
3070 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000; 3088 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
3071 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000; 3089 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
3072 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000; 3090 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
3073 } 3091 }
3074 3092
3075 if (priv->ieee->mode == IEEE_B) 3093 if (priv->ieee->mode == IEEE_B)
@@ -3097,18 +3115,18 @@ static u32 ipw_get_current_rate(struct ipw_priv *priv)
3097 return ipw_get_max_rate(priv); 3115 return ipw_get_max_rate(priv);
3098 3116
3099 switch (rate) { 3117 switch (rate) {
3100 case IPW_TX_RATE_1MB: return 1000000; 3118 case IPW_TX_RATE_1MB: return 1000000;
3101 case IPW_TX_RATE_2MB: return 2000000; 3119 case IPW_TX_RATE_2MB: return 2000000;
3102 case IPW_TX_RATE_5MB: return 5500000; 3120 case IPW_TX_RATE_5MB: return 5500000;
3103 case IPW_TX_RATE_6MB: return 6000000; 3121 case IPW_TX_RATE_6MB: return 6000000;
3104 case IPW_TX_RATE_9MB: return 9000000; 3122 case IPW_TX_RATE_9MB: return 9000000;
3105 case IPW_TX_RATE_11MB: return 11000000; 3123 case IPW_TX_RATE_11MB: return 11000000;
3106 case IPW_TX_RATE_12MB: return 12000000; 3124 case IPW_TX_RATE_12MB: return 12000000;
3107 case IPW_TX_RATE_18MB: return 18000000; 3125 case IPW_TX_RATE_18MB: return 18000000;
3108 case IPW_TX_RATE_24MB: return 24000000; 3126 case IPW_TX_RATE_24MB: return 24000000;
3109 case IPW_TX_RATE_36MB: return 36000000; 3127 case IPW_TX_RATE_36MB: return 36000000;
3110 case IPW_TX_RATE_48MB: return 48000000; 3128 case IPW_TX_RATE_48MB: return 48000000;
3111 case IPW_TX_RATE_54MB: return 54000000; 3129 case IPW_TX_RATE_54MB: return 54000000;
3112 } 3130 }
3113 3131
3114 return 0; 3132 return 0;
@@ -3126,7 +3144,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
3126 u32 len = sizeof(u32); 3144 u32 len = sizeof(u32);
3127 s16 rssi; 3145 s16 rssi;
3128 u32 beacon_quality, signal_quality, tx_quality, rx_quality, 3146 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3129 rate_quality; 3147 rate_quality;
3130 3148
3131 if (!(priv->status & STATUS_ASSOCIATED)) { 3149 if (!(priv->status & STATUS_ASSOCIATED)) {
3132 priv->quality = 0; 3150 priv->quality = 0;
@@ -3136,13 +3154,12 @@ static void ipw_gather_stats(struct ipw_priv *priv)
3136 /* Update the statistics */ 3154 /* Update the statistics */
3137 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS, 3155 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3138 &priv->missed_beacons, &len); 3156 &priv->missed_beacons, &len);
3139 missed_beacons_delta = priv->missed_beacons - 3157 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
3140 priv->last_missed_beacons;
3141 priv->last_missed_beacons = priv->missed_beacons; 3158 priv->last_missed_beacons = priv->missed_beacons;
3142 if (priv->assoc_request.beacon_interval) { 3159 if (priv->assoc_request.beacon_interval) {
3143 missed_beacons_percent = missed_beacons_delta * 3160 missed_beacons_percent = missed_beacons_delta *
3144 (HZ * priv->assoc_request.beacon_interval) / 3161 (HZ * priv->assoc_request.beacon_interval) /
3145 (IPW_STATS_INTERVAL * 10); 3162 (IPW_STATS_INTERVAL * 10);
3146 } else { 3163 } else {
3147 missed_beacons_percent = 0; 3164 missed_beacons_percent = 0;
3148 } 3165 }
@@ -3179,28 +3196,26 @@ static void ipw_gather_stats(struct ipw_priv *priv)
3179 beacon_quality = 0; 3196 beacon_quality = 0;
3180 else 3197 else
3181 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 / 3198 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3182 (100 - BEACON_THRESHOLD); 3199 (100 - BEACON_THRESHOLD);
3183 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n", 3200 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3184 beacon_quality, missed_beacons_percent); 3201 beacon_quality, missed_beacons_percent);
3185 3202
3186 priv->last_rate = ipw_get_current_rate(priv); 3203 priv->last_rate = ipw_get_current_rate(priv);
3187 rate_quality = priv->last_rate * 40 / priv->last_rate + 60; 3204 rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
3188 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n", 3205 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3189 rate_quality, priv->last_rate / 1000000); 3206 rate_quality, priv->last_rate / 1000000);
3190 3207
3191 if (rx_packets_delta > 100 && 3208 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
3192 rx_packets_delta + rx_err_delta)
3193 rx_quality = 100 - (rx_err_delta * 100) / 3209 rx_quality = 100 - (rx_err_delta * 100) /
3194 (rx_packets_delta + rx_err_delta); 3210 (rx_packets_delta + rx_err_delta);
3195 else 3211 else
3196 rx_quality = 100; 3212 rx_quality = 100;
3197 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n", 3213 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3198 rx_quality, rx_err_delta, rx_packets_delta); 3214 rx_quality, rx_err_delta, rx_packets_delta);
3199 3215
3200 if (tx_packets_delta > 100 && 3216 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
3201 tx_packets_delta + tx_failures_delta)
3202 tx_quality = 100 - (tx_failures_delta * 100) / 3217 tx_quality = 100 - (tx_failures_delta * 100) /
3203 (tx_packets_delta + tx_failures_delta); 3218 (tx_packets_delta + tx_failures_delta);
3204 else 3219 else
3205 tx_quality = 100; 3220 tx_quality = 100;
3206 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n", 3221 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
@@ -3213,7 +3228,7 @@ static void ipw_gather_stats(struct ipw_priv *priv)
3213 signal_quality = 0; 3228 signal_quality = 0;
3214 else 3229 else
3215 signal_quality = (rssi - WORST_RSSI) * 100 / 3230 signal_quality = (rssi - WORST_RSSI) * 100 /
3216 (PERFECT_RSSI - WORST_RSSI); 3231 (PERFECT_RSSI - WORST_RSSI);
3217 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n", 3232 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3218 signal_quality, rssi); 3233 signal_quality, rssi);
3219 3234
@@ -3221,25 +3236,20 @@ static void ipw_gather_stats(struct ipw_priv *priv)
3221 min(rate_quality, 3236 min(rate_quality,
3222 min(tx_quality, min(rx_quality, signal_quality)))); 3237 min(tx_quality, min(rx_quality, signal_quality))));
3223 if (quality == beacon_quality) 3238 if (quality == beacon_quality)
3224 IPW_DEBUG_STATS( 3239 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
3225 "Quality (%d%%): Clamped to missed beacons.\n", 3240 quality);
3226 quality);
3227 if (quality == rate_quality) 3241 if (quality == rate_quality)
3228 IPW_DEBUG_STATS( 3242 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
3229 "Quality (%d%%): Clamped to rate quality.\n", 3243 quality);
3230 quality);
3231 if (quality == tx_quality) 3244 if (quality == tx_quality)
3232 IPW_DEBUG_STATS( 3245 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
3233 "Quality (%d%%): Clamped to Tx quality.\n", 3246 quality);
3234 quality);
3235 if (quality == rx_quality) 3247 if (quality == rx_quality)
3236 IPW_DEBUG_STATS( 3248 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
3237 "Quality (%d%%): Clamped to Rx quality.\n", 3249 quality);
3238 quality);
3239 if (quality == signal_quality) 3250 if (quality == signal_quality)
3240 IPW_DEBUG_STATS( 3251 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
3241 "Quality (%d%%): Clamped to signal quality.\n", 3252 quality);
3242 quality);
3243 3253
3244 priv->quality = quality; 3254 priv->quality = quality;
3245 3255
@@ -3251,402 +3261,454 @@ static void ipw_gather_stats(struct ipw_priv *priv)
3251 * Handle host notification packet. 3261 * Handle host notification packet.
3252 * Called from interrupt routine 3262 * Called from interrupt routine
3253 */ 3263 */
3254static inline void ipw_rx_notification(struct ipw_priv* priv, 3264static inline void ipw_rx_notification(struct ipw_priv *priv,
3255 struct ipw_rx_notification *notif) 3265 struct ipw_rx_notification *notif)
3256{ 3266{
3257 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", 3267 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
3258 notif->subtype, notif->size);
3259 3268
3260 switch (notif->subtype) { 3269 switch (notif->subtype) {
3261 case HOST_NOTIFICATION_STATUS_ASSOCIATED: { 3270 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
3262 struct notif_association *assoc = &notif->u.assoc; 3271 struct notif_association *assoc = &notif->u.assoc;
3263 3272
3264 switch (assoc->state) { 3273 switch (assoc->state) {
3265 case CMAS_ASSOCIATED: { 3274 case CMAS_ASSOCIATED:{
3266 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, 3275 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3267 "associated: '%s' " MAC_FMT " \n", 3276 IPW_DL_ASSOC,
3268 escape_essid(priv->essid, priv->essid_len), 3277 "associated: '%s' " MAC_FMT
3269 MAC_ARG(priv->bssid)); 3278 " \n",
3270 3279 escape_essid(priv->essid,
3271 switch (priv->ieee->iw_mode) { 3280 priv->essid_len),
3272 case IW_MODE_INFRA: 3281 MAC_ARG(priv->bssid));
3273 memcpy(priv->ieee->bssid, priv->bssid, 3282
3274 ETH_ALEN); 3283 switch (priv->ieee->iw_mode) {
3275 break; 3284 case IW_MODE_INFRA:
3276 3285 memcpy(priv->ieee->bssid,
3277 case IW_MODE_ADHOC: 3286 priv->bssid, ETH_ALEN);
3278 memcpy(priv->ieee->bssid, priv->bssid, 3287 break;
3279 ETH_ALEN); 3288
3280 3289 case IW_MODE_ADHOC:
3281 /* clear out the station table */ 3290 memcpy(priv->ieee->bssid,
3282 priv->num_stations = 0; 3291 priv->bssid, ETH_ALEN);
3283 3292
3284 IPW_DEBUG_ASSOC("queueing adhoc check\n"); 3293 /* clear out the station table */
3285 queue_delayed_work(priv->workqueue, 3294 priv->num_stations = 0;
3286 &priv->adhoc_check, 3295
3287 priv->assoc_request.beacon_interval); 3296 IPW_DEBUG_ASSOC
3288 break; 3297 ("queueing adhoc check\n");
3289 } 3298 queue_delayed_work(priv->
3290 3299 workqueue,
3291 priv->status &= ~STATUS_ASSOCIATING; 3300 &priv->
3292 priv->status |= STATUS_ASSOCIATED; 3301 adhoc_check,
3293 3302 priv->
3294 netif_carrier_on(priv->net_dev); 3303 assoc_request.
3295 if (netif_queue_stopped(priv->net_dev)) { 3304 beacon_interval);
3296 IPW_DEBUG_NOTIF("waking queue\n"); 3305 break;
3297 netif_wake_queue(priv->net_dev); 3306 }
3298 } else { 3307
3299 IPW_DEBUG_NOTIF("starting queue\n"); 3308 priv->status &= ~STATUS_ASSOCIATING;
3300 netif_start_queue(priv->net_dev); 3309 priv->status |= STATUS_ASSOCIATED;
3301 } 3310
3302 3311 netif_carrier_on(priv->net_dev);
3303 ipw_reset_stats(priv); 3312 if (netif_queue_stopped(priv->net_dev)) {
3304 /* Ensure the rate is updated immediately */ 3313 IPW_DEBUG_NOTIF
3305 priv->last_rate = ipw_get_current_rate(priv); 3314 ("waking queue\n");
3306 schedule_work(&priv->gather_stats); 3315 netif_wake_queue(priv->net_dev);
3307 notify_wx_assoc_event(priv); 3316 } else {
3317 IPW_DEBUG_NOTIF
3318 ("starting queue\n");
3319 netif_start_queue(priv->
3320 net_dev);
3321 }
3322
3323 ipw_reset_stats(priv);
3324 /* Ensure the rate is updated immediately */
3325 priv->last_rate =
3326 ipw_get_current_rate(priv);
3327 schedule_work(&priv->gather_stats);
3328 notify_wx_assoc_event(priv);
3308 3329
3309/* queue_delayed_work(priv->workqueue, 3330/* queue_delayed_work(priv->workqueue,
3310 &priv->request_scan, 3331 &priv->request_scan,
3311 SCAN_ASSOCIATED_INTERVAL); 3332 SCAN_ASSOCIATED_INTERVAL);
3312*/ 3333*/
3313 break; 3334 break;
3314 } 3335 }
3315 3336
3316 case CMAS_AUTHENTICATED: { 3337 case CMAS_AUTHENTICATED:{
3317 if (priv->status & (STATUS_ASSOCIATED | STATUS_AUTH)) { 3338 if (priv->
3339 status & (STATUS_ASSOCIATED |
3340 STATUS_AUTH)) {
3318#ifdef CONFIG_IPW_DEBUG 3341#ifdef CONFIG_IPW_DEBUG
3319 struct notif_authenticate *auth = &notif->u.auth; 3342 struct notif_authenticate *auth
3320 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, 3343 = &notif->u.auth;
3321 "deauthenticated: '%s' " MAC_FMT ": (0x%04X) - %s \n", 3344 IPW_DEBUG(IPW_DL_NOTIF |
3322 escape_essid(priv->essid, priv->essid_len), 3345 IPW_DL_STATE |
3323 MAC_ARG(priv->bssid), 3346 IPW_DL_ASSOC,
3324 ntohs(auth->status), 3347 "deauthenticated: '%s' "
3325 ipw_get_status_code(ntohs(auth->status))); 3348 MAC_FMT
3349 ": (0x%04X) - %s \n",
3350 escape_essid(priv->
3351 essid,
3352 priv->
3353 essid_len),
3354 MAC_ARG(priv->bssid),
3355 ntohs(auth->status),
3356 ipw_get_status_code
3357 (ntohs
3358 (auth->status)));
3326#endif 3359#endif
3327 3360
3328 priv->status &= ~(STATUS_ASSOCIATING | 3361 priv->status &=
3329 STATUS_AUTH | 3362 ~(STATUS_ASSOCIATING |
3330 STATUS_ASSOCIATED); 3363 STATUS_AUTH |
3364 STATUS_ASSOCIATED);
3365
3366 netif_carrier_off(priv->
3367 net_dev);
3368 netif_stop_queue(priv->net_dev);
3369 queue_work(priv->workqueue,
3370 &priv->request_scan);
3371 notify_wx_assoc_event(priv);
3372 break;
3373 }
3374
3375 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3376 IPW_DL_ASSOC,
3377 "authenticated: '%s' " MAC_FMT
3378 "\n",
3379 escape_essid(priv->essid,
3380 priv->essid_len),
3381 MAC_ARG(priv->bssid));
3382 break;
3383 }
3384
3385 case CMAS_INIT:{
3386 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3387 IPW_DL_ASSOC,
3388 "disassociated: '%s' " MAC_FMT
3389 " \n",
3390 escape_essid(priv->essid,
3391 priv->essid_len),
3392 MAC_ARG(priv->bssid));
3393
3394 priv->status &=
3395 ~(STATUS_DISASSOCIATING |
3396 STATUS_ASSOCIATING |
3397 STATUS_ASSOCIATED | STATUS_AUTH);
3398
3399 netif_stop_queue(priv->net_dev);
3400 if (!(priv->status & STATUS_ROAMING)) {
3401 netif_carrier_off(priv->
3402 net_dev);
3403 notify_wx_assoc_event(priv);
3404
3405 /* Cancel any queued work ... */
3406 cancel_delayed_work(&priv->
3407 request_scan);
3408 cancel_delayed_work(&priv->
3409 adhoc_check);
3410
3411 /* Queue up another scan... */
3412 queue_work(priv->workqueue,
3413 &priv->request_scan);
3414
3415 cancel_delayed_work(&priv->
3416 gather_stats);
3417 } else {
3418 priv->status |= STATUS_ROAMING;
3419 queue_work(priv->workqueue,
3420 &priv->request_scan);
3421 }
3422
3423 ipw_reset_stats(priv);
3424 break;
3425 }
3331 3426
3332 netif_carrier_off(priv->net_dev); 3427 default:
3333 netif_stop_queue(priv->net_dev); 3428 IPW_ERROR("assoc: unknown (%d)\n",
3334 queue_work(priv->workqueue, &priv->request_scan); 3429 assoc->state);
3335 notify_wx_assoc_event(priv);
3336 break; 3430 break;
3337 } 3431 }
3338 3432
3339 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3340 "authenticated: '%s' " MAC_FMT "\n",
3341 escape_essid(priv->essid, priv->essid_len),
3342 MAC_ARG(priv->bssid));
3343 break; 3433 break;
3344 } 3434 }
3345 3435
3346 case CMAS_INIT: { 3436 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
3347 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, 3437 struct notif_authenticate *auth = &notif->u.auth;
3348 "disassociated: '%s' " MAC_FMT " \n", 3438 switch (auth->state) {
3349 escape_essid(priv->essid, priv->essid_len), 3439 case CMAS_AUTHENTICATED:
3350 MAC_ARG(priv->bssid)); 3440 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3441 "authenticated: '%s' " MAC_FMT " \n",
3442 escape_essid(priv->essid,
3443 priv->essid_len),
3444 MAC_ARG(priv->bssid));
3445 priv->status |= STATUS_AUTH;
3446 break;
3351 3447
3352 priv->status &= ~( 3448 case CMAS_INIT:
3353 STATUS_DISASSOCIATING | 3449 if (priv->status & STATUS_AUTH) {
3354 STATUS_ASSOCIATING | 3450 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3355 STATUS_ASSOCIATED | 3451 IPW_DL_ASSOC,
3356 STATUS_AUTH); 3452 "authentication failed (0x%04X): %s\n",
3453 ntohs(auth->status),
3454 ipw_get_status_code(ntohs
3455 (auth->
3456 status)));
3457 }
3458 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3459 IPW_DL_ASSOC,
3460 "deauthenticated: '%s' " MAC_FMT "\n",
3461 escape_essid(priv->essid,
3462 priv->essid_len),
3463 MAC_ARG(priv->bssid));
3357 3464
3358 netif_stop_queue(priv->net_dev); 3465 priv->status &= ~(STATUS_ASSOCIATING |
3359 if (!(priv->status & STATUS_ROAMING)) { 3466 STATUS_AUTH |
3360 netif_carrier_off(priv->net_dev); 3467 STATUS_ASSOCIATED);
3361 notify_wx_assoc_event(priv);
3362
3363 /* Cancel any queued work ... */
3364 cancel_delayed_work(&priv->request_scan);
3365 cancel_delayed_work(&priv->adhoc_check);
3366 3468
3367 /* Queue up another scan... */ 3469 netif_carrier_off(priv->net_dev);
3470 netif_stop_queue(priv->net_dev);
3368 queue_work(priv->workqueue, 3471 queue_work(priv->workqueue,
3369 &priv->request_scan); 3472 &priv->request_scan);
3473 notify_wx_assoc_event(priv);
3474 break;
3370 3475
3371 cancel_delayed_work(&priv->gather_stats); 3476 case CMAS_TX_AUTH_SEQ_1:
3372 } else { 3477 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3373 priv->status |= STATUS_ROAMING; 3478 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
3374 queue_work(priv->workqueue, 3479 break;
3375 &priv->request_scan); 3480 case CMAS_RX_AUTH_SEQ_2:
3481 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3482 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
3483 break;
3484 case CMAS_AUTH_SEQ_1_PASS:
3485 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3486 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
3487 break;
3488 case CMAS_AUTH_SEQ_1_FAIL:
3489 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3490 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
3491 break;
3492 case CMAS_TX_AUTH_SEQ_3:
3493 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3494 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
3495 break;
3496 case CMAS_RX_AUTH_SEQ_4:
3497 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3498 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
3499 break;
3500 case CMAS_AUTH_SEQ_2_PASS:
3501 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3502 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
3503 break;
3504 case CMAS_AUTH_SEQ_2_FAIL:
3505 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3506 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
3507 break;
3508 case CMAS_TX_ASSOC:
3509 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3510 IPW_DL_ASSOC, "TX_ASSOC\n");
3511 break;
3512 case CMAS_RX_ASSOC_RESP:
3513 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3514 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
3515 break;
3516 case CMAS_ASSOCIATED:
3517 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
3518 IPW_DL_ASSOC, "ASSOCIATED\n");
3519 break;
3520 default:
3521 IPW_DEBUG_NOTIF("auth: failure - %d\n",
3522 auth->state);
3523 break;
3376 } 3524 }
3377
3378 ipw_reset_stats(priv);
3379 break;
3380 }
3381
3382 default:
3383 IPW_ERROR("assoc: unknown (%d)\n",
3384 assoc->state);
3385 break; 3525 break;
3386 } 3526 }
3387 3527
3388 break; 3528 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
3389 } 3529 struct notif_channel_result *x =
3530 &notif->u.channel_result;
3390 3531
3391 case HOST_NOTIFICATION_STATUS_AUTHENTICATE: { 3532 if (notif->size == sizeof(*x)) {
3392 struct notif_authenticate *auth = &notif->u.auth; 3533 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3393 switch (auth->state) { 3534 x->channel_num);
3394 case CMAS_AUTHENTICATED: 3535 } else {
3395 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 3536 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3396 "authenticated: '%s' " MAC_FMT " \n", 3537 "(should be %zd)\n",
3397 escape_essid(priv->essid, priv->essid_len), 3538 notif->size, sizeof(*x));
3398 MAC_ARG(priv->bssid));
3399 priv->status |= STATUS_AUTH;
3400 break;
3401
3402 case CMAS_INIT:
3403 if (priv->status & STATUS_AUTH) {
3404 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3405 "authentication failed (0x%04X): %s\n",
3406 ntohs(auth->status),
3407 ipw_get_status_code(ntohs(auth->status)));
3408 } 3539 }
3409 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3410 "deauthenticated: '%s' " MAC_FMT "\n",
3411 escape_essid(priv->essid, priv->essid_len),
3412 MAC_ARG(priv->bssid));
3413
3414 priv->status &= ~(STATUS_ASSOCIATING |
3415 STATUS_AUTH |
3416 STATUS_ASSOCIATED);
3417
3418 netif_carrier_off(priv->net_dev);
3419 netif_stop_queue(priv->net_dev);
3420 queue_work(priv->workqueue, &priv->request_scan);
3421 notify_wx_assoc_event(priv);
3422 break;
3423
3424 case CMAS_TX_AUTH_SEQ_1:
3425 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3426 "AUTH_SEQ_1\n");
3427 break;
3428 case CMAS_RX_AUTH_SEQ_2:
3429 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3430 "AUTH_SEQ_2\n");
3431 break;
3432 case CMAS_AUTH_SEQ_1_PASS:
3433 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3434 "AUTH_SEQ_1_PASS\n");
3435 break;
3436 case CMAS_AUTH_SEQ_1_FAIL:
3437 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3438 "AUTH_SEQ_1_FAIL\n");
3439 break;
3440 case CMAS_TX_AUTH_SEQ_3:
3441 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3442 "AUTH_SEQ_3\n");
3443 break;
3444 case CMAS_RX_AUTH_SEQ_4:
3445 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3446 "RX_AUTH_SEQ_4\n");
3447 break;
3448 case CMAS_AUTH_SEQ_2_PASS:
3449 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3450 "AUTH_SEQ_2_PASS\n");
3451 break;
3452 case CMAS_AUTH_SEQ_2_FAIL:
3453 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3454 "AUT_SEQ_2_FAIL\n");
3455 break;
3456 case CMAS_TX_ASSOC:
3457 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3458 "TX_ASSOC\n");
3459 break;
3460 case CMAS_RX_ASSOC_RESP:
3461 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3462 "RX_ASSOC_RESP\n");
3463 break;
3464 case CMAS_ASSOCIATED:
3465 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3466 "ASSOCIATED\n");
3467 break;
3468 default:
3469 IPW_DEBUG_NOTIF("auth: failure - %d\n", auth->state);
3470 break; 3540 break;
3471 } 3541 }
3472 break;
3473 }
3474
3475 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT: {
3476 struct notif_channel_result *x = &notif->u.channel_result;
3477
3478 if (notif->size == sizeof(*x)) {
3479 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3480 x->channel_num);
3481 } else {
3482 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3483 "(should be %zd)\n",
3484 notif->size, sizeof(*x));
3485 }
3486 break;
3487 }
3488 3542
3489 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED: { 3543 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
3490 struct notif_scan_complete* x = &notif->u.scan_complete; 3544 struct notif_scan_complete *x = &notif->u.scan_complete;
3491 if (notif->size == sizeof(*x)) { 3545 if (notif->size == sizeof(*x)) {
3492 IPW_DEBUG_SCAN("Scan completed: type %d, %d channels, " 3546 IPW_DEBUG_SCAN
3493 "%d status\n", 3547 ("Scan completed: type %d, %d channels, "
3494 x->scan_type, 3548 "%d status\n", x->scan_type,
3495 x->num_channels, 3549 x->num_channels, x->status);
3496 x->status); 3550 } else {
3497 } else { 3551 IPW_ERROR("Scan completed of wrong size %d "
3498 IPW_ERROR("Scan completed of wrong size %d " 3552 "(should be %zd)\n",
3499 "(should be %zd)\n", 3553 notif->size, sizeof(*x));
3500 notif->size, sizeof(*x)); 3554 }
3501 }
3502
3503 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3504
3505 cancel_delayed_work(&priv->scan_check);
3506
3507 if (!(priv->status & (STATUS_ASSOCIATED |
3508 STATUS_ASSOCIATING |
3509 STATUS_ROAMING |
3510 STATUS_DISASSOCIATING)))
3511 queue_work(priv->workqueue, &priv->associate);
3512 else if (priv->status & STATUS_ROAMING) {
3513 /* If a scan completed and we are in roam mode, then
3514 * the scan that completed was the one requested as a
3515 * result of entering roam... so, schedule the
3516 * roam work */
3517 queue_work(priv->workqueue, &priv->roam);
3518 } else if (priv->status & STATUS_SCAN_PENDING)
3519 queue_work(priv->workqueue, &priv->request_scan);
3520
3521 priv->ieee->scans++;
3522 break;
3523 }
3524 3555
3525 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH: { 3556 priv->status &=
3526 struct notif_frag_length *x = &notif->u.frag_len; 3557 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3558
3559 cancel_delayed_work(&priv->scan_check);
3560
3561 if (!(priv->status & (STATUS_ASSOCIATED |
3562 STATUS_ASSOCIATING |
3563 STATUS_ROAMING |
3564 STATUS_DISASSOCIATING)))
3565 queue_work(priv->workqueue, &priv->associate);
3566 else if (priv->status & STATUS_ROAMING) {
3567 /* If a scan completed and we are in roam mode, then
3568 * the scan that completed was the one requested as a
3569 * result of entering roam... so, schedule the
3570 * roam work */
3571 queue_work(priv->workqueue, &priv->roam);
3572 } else if (priv->status & STATUS_SCAN_PENDING)
3573 queue_work(priv->workqueue,
3574 &priv->request_scan);
3527 3575
3528 if (notif->size == sizeof(*x)) { 3576 priv->ieee->scans++;
3529 IPW_ERROR("Frag length: %d\n", x->frag_length); 3577 break;
3530 } else {
3531 IPW_ERROR("Frag length of wrong size %d "
3532 "(should be %zd)\n",
3533 notif->size, sizeof(*x));
3534 } 3578 }
3535 break;
3536 }
3537 3579
3538 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION: { 3580 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
3539 struct notif_link_deterioration *x = 3581 struct notif_frag_length *x = &notif->u.frag_len;
3540 &notif->u.link_deterioration;
3541 if (notif->size==sizeof(*x)) {
3542 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3543 "link deterioration: '%s' " MAC_FMT " \n",
3544 escape_essid(priv->essid, priv->essid_len),
3545 MAC_ARG(priv->bssid));
3546 memcpy(&priv->last_link_deterioration, x, sizeof(*x));
3547 } else {
3548 IPW_ERROR("Link Deterioration of wrong size %d "
3549 "(should be %zd)\n",
3550 notif->size, sizeof(*x));
3551 }
3552 break;
3553 }
3554 3582
3555 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE: { 3583 if (notif->size == sizeof(*x)) {
3556 IPW_ERROR("Dino config\n"); 3584 IPW_ERROR("Frag length: %d\n", x->frag_length);
3557 if (priv->hcmd && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) { 3585 } else {
3558 /* TODO: Do anything special? */ 3586 IPW_ERROR("Frag length of wrong size %d "
3559 } else { 3587 "(should be %zd)\n",
3560 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n"); 3588 notif->size, sizeof(*x));
3589 }
3590 break;
3561 } 3591 }
3562 break;
3563 }
3564 3592
3565 case HOST_NOTIFICATION_STATUS_BEACON_STATE: { 3593 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
3566 struct notif_beacon_state *x = &notif->u.beacon_state; 3594 struct notif_link_deterioration *x =
3567 if (notif->size != sizeof(*x)) { 3595 &notif->u.link_deterioration;
3568 IPW_ERROR("Beacon state of wrong size %d (should " 3596 if (notif->size == sizeof(*x)) {
3569 "be %zd)\n", notif->size, sizeof(*x)); 3597 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3598 "link deterioration: '%s' " MAC_FMT
3599 " \n", escape_essid(priv->essid,
3600 priv->essid_len),
3601 MAC_ARG(priv->bssid));
3602 memcpy(&priv->last_link_deterioration, x,
3603 sizeof(*x));
3604 } else {
3605 IPW_ERROR("Link Deterioration of wrong size %d "
3606 "(should be %zd)\n",
3607 notif->size, sizeof(*x));
3608 }
3570 break; 3609 break;
3571 } 3610 }
3572 3611
3573 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) { 3612 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
3574 if (priv->status & STATUS_SCANNING) { 3613 IPW_ERROR("Dino config\n");
3575 /* Stop scan to keep fw from getting 3614 if (priv->hcmd
3576 * stuck... */ 3615 && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3577 queue_work(priv->workqueue, 3616 /* TODO: Do anything special? */
3578 &priv->abort_scan); 3617 } else {
3618 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3579 } 3619 }
3620 break;
3621 }
3580 3622
3581 if (x->number > priv->missed_beacon_threshold && 3623 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
3582 priv->status & STATUS_ASSOCIATED) { 3624 struct notif_beacon_state *x = &notif->u.beacon_state;
3583 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | 3625 if (notif->size != sizeof(*x)) {
3584 IPW_DL_STATE, 3626 IPW_ERROR
3585 "Missed beacon: %d - disassociate\n", 3627 ("Beacon state of wrong size %d (should "
3586 x->number); 3628 "be %zd)\n", notif->size, sizeof(*x));
3587 queue_work(priv->workqueue, 3629 break;
3588 &priv->disassociate);
3589 } else if (x->number > priv->roaming_threshold) {
3590 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3591 "Missed beacon: %d - initiate "
3592 "roaming\n",
3593 x->number);
3594 queue_work(priv->workqueue,
3595 &priv->roam);
3596 } else {
3597 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3598 x->number);
3599 } 3630 }
3600 3631
3601 priv->notif_missed_beacons = x->number; 3632 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
3633 if (priv->status & STATUS_SCANNING) {
3634 /* Stop scan to keep fw from getting
3635 * stuck... */
3636 queue_work(priv->workqueue,
3637 &priv->abort_scan);
3638 }
3639
3640 if (x->number > priv->missed_beacon_threshold &&
3641 priv->status & STATUS_ASSOCIATED) {
3642 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3643 IPW_DL_STATE,
3644 "Missed beacon: %d - disassociate\n",
3645 x->number);
3646 queue_work(priv->workqueue,
3647 &priv->disassociate);
3648 } else if (x->number > priv->roaming_threshold) {
3649 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3650 "Missed beacon: %d - initiate "
3651 "roaming\n", x->number);
3652 queue_work(priv->workqueue,
3653 &priv->roam);
3654 } else {
3655 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3656 x->number);
3657 }
3658
3659 priv->notif_missed_beacons = x->number;
3602 3660
3603 } 3661 }
3604 3662
3663 break;
3664 }
3605 3665
3606 break; 3666 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
3607 } 3667 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
3668 if (notif->size == sizeof(*x)) {
3669 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3670 "0x%02x station %d\n",
3671 x->key_state, x->security_type,
3672 x->station_index);
3673 break;
3674 }
3608 3675
3609 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY: { 3676 IPW_ERROR
3610 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key; 3677 ("TGi Tx Key of wrong size %d (should be %zd)\n",
3611 if (notif->size==sizeof(*x)) { 3678 notif->size, sizeof(*x));
3612 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3613 "0x%02x station %d\n",
3614 x->key_state,x->security_type,
3615 x->station_index);
3616 break; 3679 break;
3617 } 3680 }
3618 3681
3619 IPW_ERROR("TGi Tx Key of wrong size %d (should be %zd)\n", 3682 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
3620 notif->size, sizeof(*x)); 3683 struct notif_calibration *x = &notif->u.calibration;
3621 break;
3622 }
3623 3684
3624 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS: { 3685 if (notif->size == sizeof(*x)) {
3625 struct notif_calibration *x = &notif->u.calibration; 3686 memcpy(&priv->calib, x, sizeof(*x));
3687 IPW_DEBUG_INFO("TODO: Calibration\n");
3688 break;
3689 }
3626 3690
3627 if (notif->size == sizeof(*x)) { 3691 IPW_ERROR
3628 memcpy(&priv->calib, x, sizeof(*x)); 3692 ("Calibration of wrong size %d (should be %zd)\n",
3629 IPW_DEBUG_INFO("TODO: Calibration\n"); 3693 notif->size, sizeof(*x));
3630 break; 3694 break;
3631 } 3695 }
3632 3696
3633 IPW_ERROR("Calibration of wrong size %d (should be %zd)\n", 3697 case HOST_NOTIFICATION_NOISE_STATS:{
3634 notif->size, sizeof(*x)); 3698 if (notif->size == sizeof(u32)) {
3635 break; 3699 priv->last_noise =
3636 } 3700 (u8) (notif->u.noise.value & 0xff);
3701 average_add(&priv->average_noise,
3702 priv->last_noise);
3703 break;
3704 }
3637 3705
3638 case HOST_NOTIFICATION_NOISE_STATS: { 3706 IPW_ERROR
3639 if (notif->size == sizeof(u32)) { 3707 ("Noise stat is wrong size %d (should be %zd)\n",
3640 priv->last_noise = (u8)(notif->u.noise.value & 0xff); 3708 notif->size, sizeof(u32));
3641 average_add(&priv->average_noise, priv->last_noise);
3642 break; 3709 break;
3643 } 3710 }
3644 3711
3645 IPW_ERROR("Noise stat is wrong size %d (should be %zd)\n",
3646 notif->size, sizeof(u32));
3647 break;
3648 }
3649
3650 default: 3712 default:
3651 IPW_ERROR("Unknown notification: " 3713 IPW_ERROR("Unknown notification: "
3652 "subtype=%d,flags=0x%2x,size=%d\n", 3714 "subtype=%d,flags=0x%2x,size=%d\n",
@@ -3680,8 +3742,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
3680 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx, 3742 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3681 CX2_TX_QUEUE_0_READ_INDEX, 3743 CX2_TX_QUEUE_0_READ_INDEX,
3682 CX2_TX_QUEUE_0_WRITE_INDEX, 3744 CX2_TX_QUEUE_0_WRITE_INDEX,
3683 CX2_TX_QUEUE_0_BD_BASE, 3745 CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
3684 CX2_TX_QUEUE_0_BD_SIZE);
3685 if (rc) { 3746 if (rc) {
3686 IPW_ERROR("Tx 0 queue init failed\n"); 3747 IPW_ERROR("Tx 0 queue init failed\n");
3687 goto error; 3748 goto error;
@@ -3689,8 +3750,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
3689 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx, 3750 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3690 CX2_TX_QUEUE_1_READ_INDEX, 3751 CX2_TX_QUEUE_1_READ_INDEX,
3691 CX2_TX_QUEUE_1_WRITE_INDEX, 3752 CX2_TX_QUEUE_1_WRITE_INDEX,
3692 CX2_TX_QUEUE_1_BD_BASE, 3753 CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
3693 CX2_TX_QUEUE_1_BD_SIZE);
3694 if (rc) { 3754 if (rc) {
3695 IPW_ERROR("Tx 1 queue init failed\n"); 3755 IPW_ERROR("Tx 1 queue init failed\n");
3696 goto error; 3756 goto error;
@@ -3698,8 +3758,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
3698 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx, 3758 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3699 CX2_TX_QUEUE_2_READ_INDEX, 3759 CX2_TX_QUEUE_2_READ_INDEX,
3700 CX2_TX_QUEUE_2_WRITE_INDEX, 3760 CX2_TX_QUEUE_2_WRITE_INDEX,
3701 CX2_TX_QUEUE_2_BD_BASE, 3761 CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
3702 CX2_TX_QUEUE_2_BD_SIZE);
3703 if (rc) { 3762 if (rc) {
3704 IPW_ERROR("Tx 2 queue init failed\n"); 3763 IPW_ERROR("Tx 2 queue init failed\n");
3705 goto error; 3764 goto error;
@@ -3707,8 +3766,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
3707 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx, 3766 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3708 CX2_TX_QUEUE_3_READ_INDEX, 3767 CX2_TX_QUEUE_3_READ_INDEX,
3709 CX2_TX_QUEUE_3_WRITE_INDEX, 3768 CX2_TX_QUEUE_3_WRITE_INDEX,
3710 CX2_TX_QUEUE_3_BD_BASE, 3769 CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
3711 CX2_TX_QUEUE_3_BD_SIZE);
3712 if (rc) { 3770 if (rc) {
3713 IPW_ERROR("Tx 3 queue init failed\n"); 3771 IPW_ERROR("Tx 3 queue init failed\n");
3714 goto error; 3772 goto error;
@@ -3718,7 +3776,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
3718 priv->rx_pend_max = 0; 3776 priv->rx_pend_max = 0;
3719 return rc; 3777 return rc;
3720 3778
3721 error: 3779 error:
3722 ipw_tx_queue_free(priv); 3780 ipw_tx_queue_free(priv);
3723 return rc; 3781 return rc;
3724} 3782}
@@ -3746,8 +3804,8 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3746 hw_tail = ipw_read32(priv, q->reg_r); 3804 hw_tail = ipw_read32(priv, q->reg_r);
3747 if (hw_tail >= q->n_bd) { 3805 if (hw_tail >= q->n_bd) {
3748 IPW_ERROR 3806 IPW_ERROR
3749 ("Read index for DMA queue (%d) is out of range [0-%d)\n", 3807 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3750 hw_tail, q->n_bd); 3808 hw_tail, q->n_bd);
3751 goto done; 3809 goto done;
3752 } 3810 }
3753 for (; q->last_used != hw_tail; 3811 for (; q->last_used != hw_tail;
@@ -3755,7 +3813,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3755 ipw_queue_tx_free_tfd(priv, txq); 3813 ipw_queue_tx_free_tfd(priv, txq);
3756 priv->tx_packets++; 3814 priv->tx_packets++;
3757 } 3815 }
3758 done: 3816 done:
3759 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) { 3817 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3760 __maybe_wake_tx(priv); 3818 __maybe_wake_tx(priv);
3761 } 3819 }
@@ -3795,8 +3853,6 @@ static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3795 return 0; 3853 return 0;
3796} 3854}
3797 3855
3798
3799
3800/* 3856/*
3801 * Rx theory of operation 3857 * Rx theory of operation
3802 * 3858 *
@@ -3933,9 +3989,9 @@ static void ipw_rx_queue_replenish(void *data)
3933 list_del(element); 3989 list_del(element);
3934 3990
3935 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data; 3991 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
3936 rxb->dma_addr = pci_map_single( 3992 rxb->dma_addr =
3937 priv->pci_dev, rxb->skb->data, CX2_RX_BUF_SIZE, 3993 pci_map_single(priv->pci_dev, rxb->skb->data,
3938 PCI_DMA_FROMDEVICE); 3994 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3939 3995
3940 list_add_tail(&rxb->list, &rxq->rx_free); 3996 list_add_tail(&rxb->list, &rxq->rx_free);
3941 rxq->free_count++; 3997 rxq->free_count++;
@@ -3950,8 +4006,7 @@ static void ipw_rx_queue_replenish(void *data)
3950 * This free routine walks the list of POOL entries and if SKB is set to 4006 * This free routine walks the list of POOL entries and if SKB is set to
3951 * non NULL it is unmapped and freed 4007 * non NULL it is unmapped and freed
3952 */ 4008 */
3953static void ipw_rx_queue_free(struct ipw_priv *priv, 4009static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
3954 struct ipw_rx_queue *rxq)
3955{ 4010{
3956 int i; 4011 int i;
3957 4012
@@ -3961,8 +4016,7 @@ static void ipw_rx_queue_free(struct ipw_priv *priv,
3961 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 4016 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3962 if (rxq->pool[i].skb != NULL) { 4017 if (rxq->pool[i].skb != NULL) {
3963 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 4018 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3964 CX2_RX_BUF_SIZE, 4019 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3965 PCI_DMA_FROMDEVICE);
3966 dev_kfree_skb(rxq->pool[i].skb); 4020 dev_kfree_skb(rxq->pool[i].skb);
3967 } 4021 }
3968 } 4022 }
@@ -4001,28 +4055,28 @@ static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
4001 switch (rate) { 4055 switch (rate) {
4002 case IEEE80211_OFDM_RATE_6MB: 4056 case IEEE80211_OFDM_RATE_6MB:
4003 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 4057 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4004 1 : 0; 4058 1 : 0;
4005 case IEEE80211_OFDM_RATE_9MB: 4059 case IEEE80211_OFDM_RATE_9MB:
4006 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 4060 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4007 1 : 0; 4061 1 : 0;
4008 case IEEE80211_OFDM_RATE_12MB: 4062 case IEEE80211_OFDM_RATE_12MB:
4009 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 4063 return priv->
4010 1 : 0; 4064 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4011 case IEEE80211_OFDM_RATE_18MB: 4065 case IEEE80211_OFDM_RATE_18MB:
4012 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 4066 return priv->
4013 1 : 0; 4067 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4014 case IEEE80211_OFDM_RATE_24MB: 4068 case IEEE80211_OFDM_RATE_24MB:
4015 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 4069 return priv->
4016 1 : 0; 4070 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4017 case IEEE80211_OFDM_RATE_36MB: 4071 case IEEE80211_OFDM_RATE_36MB:
4018 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 4072 return priv->
4019 1 : 0; 4073 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4020 case IEEE80211_OFDM_RATE_48MB: 4074 case IEEE80211_OFDM_RATE_48MB:
4021 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 4075 return priv->
4022 1 : 0; 4076 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4023 case IEEE80211_OFDM_RATE_54MB: 4077 case IEEE80211_OFDM_RATE_54MB:
4024 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 4078 return priv->
4025 1 : 0; 4079 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4026 default: 4080 default:
4027 return 0; 4081 return 0;
4028 } 4082 }
@@ -4074,10 +4128,11 @@ static int ipw_compatible_rates(struct ipw_priv *priv,
4074 int num_rates, i; 4128 int num_rates, i;
4075 4129
4076 memset(rates, 0, sizeof(*rates)); 4130 memset(rates, 0, sizeof(*rates));
4077 num_rates = min(network->rates_len, (u8)IPW_MAX_RATES); 4131 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
4078 rates->num_rates = 0; 4132 rates->num_rates = 0;
4079 for (i = 0; i < num_rates; i++) { 4133 for (i = 0; i < num_rates; i++) {
4080 if (!ipw_is_rate_in_mask(priv, network->mode, network->rates[i])) { 4134 if (!ipw_is_rate_in_mask
4135 (priv, network->mode, network->rates[i])) {
4081 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 4136 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4082 network->rates[i], priv->rates_mask); 4137 network->rates[i], priv->rates_mask);
4083 continue; 4138 continue;
@@ -4086,15 +4141,18 @@ static int ipw_compatible_rates(struct ipw_priv *priv,
4086 rates->supported_rates[rates->num_rates++] = network->rates[i]; 4141 rates->supported_rates[rates->num_rates++] = network->rates[i];
4087 } 4142 }
4088 4143
4089 num_rates = min(network->rates_ex_len, (u8)(IPW_MAX_RATES - num_rates)); 4144 num_rates =
4145 min(network->rates_ex_len, (u8) (IPW_MAX_RATES - num_rates));
4090 for (i = 0; i < num_rates; i++) { 4146 for (i = 0; i < num_rates; i++) {
4091 if (!ipw_is_rate_in_mask(priv, network->mode, network->rates_ex[i])) { 4147 if (!ipw_is_rate_in_mask
4148 (priv, network->mode, network->rates_ex[i])) {
4092 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n", 4149 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4093 network->rates_ex[i], priv->rates_mask); 4150 network->rates_ex[i], priv->rates_mask);
4094 continue; 4151 continue;
4095 } 4152 }
4096 4153
4097 rates->supported_rates[rates->num_rates++] = network->rates_ex[i]; 4154 rates->supported_rates[rates->num_rates++] =
4155 network->rates_ex[i];
4098 } 4156 }
4099 4157
4100 return rates->num_rates; 4158 return rates->num_rates;
@@ -4113,65 +4171,65 @@ static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4113 * mask should ever be used -- right now all callers to add the scan rates are 4171 * mask should ever be used -- right now all callers to add the scan rates are
4114 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */ 4172 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4115static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates, 4173static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4116 u8 modulation, u32 rate_mask) 4174 u8 modulation, u32 rate_mask)
4117{ 4175{
4118 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ? 4176 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4119 IEEE80211_BASIC_RATE_MASK : 0; 4177 IEEE80211_BASIC_RATE_MASK : 0;
4120 4178
4121 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK) 4179 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4122 rates->supported_rates[rates->num_rates++] = 4180 rates->supported_rates[rates->num_rates++] =
4123 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; 4181 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4124 4182
4125 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK) 4183 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4126 rates->supported_rates[rates->num_rates++] = 4184 rates->supported_rates[rates->num_rates++] =
4127 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; 4185 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4128 4186
4129 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK) 4187 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4130 rates->supported_rates[rates->num_rates++] = basic_mask | 4188 rates->supported_rates[rates->num_rates++] = basic_mask |
4131 IEEE80211_CCK_RATE_5MB; 4189 IEEE80211_CCK_RATE_5MB;
4132 4190
4133 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK) 4191 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4134 rates->supported_rates[rates->num_rates++] = basic_mask | 4192 rates->supported_rates[rates->num_rates++] = basic_mask |
4135 IEEE80211_CCK_RATE_11MB; 4193 IEEE80211_CCK_RATE_11MB;
4136} 4194}
4137 4195
4138static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates, 4196static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4139 u8 modulation, u32 rate_mask) 4197 u8 modulation, u32 rate_mask)
4140{ 4198{
4141 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ? 4199 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4142 IEEE80211_BASIC_RATE_MASK : 0; 4200 IEEE80211_BASIC_RATE_MASK : 0;
4143 4201
4144 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK) 4202 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4145 rates->supported_rates[rates->num_rates++] = basic_mask | 4203 rates->supported_rates[rates->num_rates++] = basic_mask |
4146 IEEE80211_OFDM_RATE_6MB; 4204 IEEE80211_OFDM_RATE_6MB;
4147 4205
4148 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK) 4206 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4149 rates->supported_rates[rates->num_rates++] = 4207 rates->supported_rates[rates->num_rates++] =
4150 IEEE80211_OFDM_RATE_9MB; 4208 IEEE80211_OFDM_RATE_9MB;
4151 4209
4152 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK) 4210 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4153 rates->supported_rates[rates->num_rates++] = basic_mask | 4211 rates->supported_rates[rates->num_rates++] = basic_mask |
4154 IEEE80211_OFDM_RATE_12MB; 4212 IEEE80211_OFDM_RATE_12MB;
4155 4213
4156 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK) 4214 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4157 rates->supported_rates[rates->num_rates++] = 4215 rates->supported_rates[rates->num_rates++] =
4158 IEEE80211_OFDM_RATE_18MB; 4216 IEEE80211_OFDM_RATE_18MB;
4159 4217
4160 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK) 4218 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4161 rates->supported_rates[rates->num_rates++] = basic_mask | 4219 rates->supported_rates[rates->num_rates++] = basic_mask |
4162 IEEE80211_OFDM_RATE_24MB; 4220 IEEE80211_OFDM_RATE_24MB;
4163 4221
4164 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK) 4222 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4165 rates->supported_rates[rates->num_rates++] = 4223 rates->supported_rates[rates->num_rates++] =
4166 IEEE80211_OFDM_RATE_36MB; 4224 IEEE80211_OFDM_RATE_36MB;
4167 4225
4168 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK) 4226 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4169 rates->supported_rates[rates->num_rates++] = 4227 rates->supported_rates[rates->num_rates++] =
4170 IEEE80211_OFDM_RATE_48MB; 4228 IEEE80211_OFDM_RATE_48MB;
4171 4229
4172 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK) 4230 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4173 rates->supported_rates[rates->num_rates++] = 4231 rates->supported_rates[rates->num_rates++] =
4174 IEEE80211_OFDM_RATE_54MB; 4232 IEEE80211_OFDM_RATE_54MB;
4175} 4233}
4176 4234
4177struct ipw_network_match { 4235struct ipw_network_match {
@@ -4179,11 +4237,9 @@ struct ipw_network_match {
4179 struct ipw_supported_rates rates; 4237 struct ipw_supported_rates rates;
4180}; 4238};
4181 4239
4182static int ipw_best_network( 4240static int ipw_best_network(struct ipw_priv *priv,
4183 struct ipw_priv *priv, 4241 struct ipw_network_match *match,
4184 struct ipw_network_match *match, 4242 struct ieee80211_network *network, int roaming)
4185 struct ieee80211_network *network,
4186 int roaming)
4187{ 4243{
4188 struct ipw_supported_rates rates; 4244 struct ipw_supported_rates rates;
4189 4245
@@ -4231,21 +4287,21 @@ static int ipw_best_network(
4231 memcmp(network->ssid, priv->essid, 4287 memcmp(network->ssid, priv->essid,
4232 min(network->ssid_len, priv->essid_len)))) { 4288 min(network->ssid_len, priv->essid_len)))) {
4233 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 4289 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4234 strncpy(escaped, escape_essid( 4290 strncpy(escaped,
4235 network->ssid, network->ssid_len), 4291 escape_essid(network->ssid, network->ssid_len),
4236 sizeof(escaped)); 4292 sizeof(escaped));
4237 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " 4293 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4238 "because of ESSID mismatch: '%s'.\n", 4294 "because of ESSID mismatch: '%s'.\n",
4239 escaped, MAC_ARG(network->bssid), 4295 escaped, MAC_ARG(network->bssid),
4240 escape_essid(priv->essid, priv->essid_len)); 4296 escape_essid(priv->essid,
4297 priv->essid_len));
4241 return 0; 4298 return 0;
4242 } 4299 }
4243 } 4300 }
4244 4301
4245 /* If the old network rate is better than this one, don't bother 4302 /* If the old network rate is better than this one, don't bother
4246 * testing everything else. */ 4303 * testing everything else. */
4247 if (match->network && match->network->stats.rssi > 4304 if (match->network && match->network->stats.rssi > network->stats.rssi) {
4248 network->stats.rssi) {
4249 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 4305 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4250 strncpy(escaped, 4306 strncpy(escaped,
4251 escape_essid(network->ssid, network->ssid_len), 4307 escape_essid(network->ssid, network->ssid_len),
@@ -4303,7 +4359,7 @@ static int ipw_best_network(
4303 priv->capability & CAP_PRIVACY_ON ? "on" : 4359 priv->capability & CAP_PRIVACY_ON ? "on" :
4304 "off", 4360 "off",
4305 network->capability & 4361 network->capability &
4306 WLAN_CAPABILITY_PRIVACY ?"on" : "off"); 4362 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
4307 return 0; 4363 return 0;
4308 } 4364 }
4309 4365
@@ -4312,8 +4368,7 @@ static int ipw_best_network(
4312 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded " 4368 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4313 "because of BSSID mismatch: " MAC_FMT ".\n", 4369 "because of BSSID mismatch: " MAC_FMT ".\n",
4314 escape_essid(network->ssid, network->ssid_len), 4370 escape_essid(network->ssid, network->ssid_len),
4315 MAC_ARG(network->bssid), 4371 MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
4316 MAC_ARG(priv->bssid));
4317 return 0; 4372 return 0;
4318 } 4373 }
4319 4374
@@ -4351,9 +4406,8 @@ static int ipw_best_network(
4351 return 1; 4406 return 1;
4352} 4407}
4353 4408
4354
4355static void ipw_adhoc_create(struct ipw_priv *priv, 4409static void ipw_adhoc_create(struct ipw_priv *priv,
4356 struct ieee80211_network *network) 4410 struct ieee80211_network *network)
4357{ 4411{
4358 /* 4412 /*
4359 * For the purposes of scanning, we can set our wireless mode 4413 * For the purposes of scanning, we can set our wireless mode
@@ -4393,8 +4447,7 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
4393 if (priv->capability & CAP_PRIVACY_ON) 4447 if (priv->capability & CAP_PRIVACY_ON)
4394 network->capability |= WLAN_CAPABILITY_PRIVACY; 4448 network->capability |= WLAN_CAPABILITY_PRIVACY;
4395 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH); 4449 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4396 memcpy(network->rates, priv->rates.supported_rates, 4450 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
4397 network->rates_len);
4398 network->rates_ex_len = priv->rates.num_rates - network->rates_len; 4451 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4399 memcpy(network->rates_ex, 4452 memcpy(network->rates_ex,
4400 &priv->rates.supported_rates[network->rates_len], 4453 &priv->rates.supported_rates[network->rates_len],
@@ -4404,13 +4457,13 @@ static void ipw_adhoc_create(struct ipw_priv *priv,
4404 network->last_associate = 0; 4457 network->last_associate = 0;
4405 network->time_stamp[0] = 0; 4458 network->time_stamp[0] = 0;
4406 network->time_stamp[1] = 0; 4459 network->time_stamp[1] = 0;
4407 network->beacon_interval = 100; /* Default */ 4460 network->beacon_interval = 100; /* Default */
4408 network->listen_interval = 10; /* Default */ 4461 network->listen_interval = 10; /* Default */
4409 network->atim_window = 0; /* Default */ 4462 network->atim_window = 0; /* Default */
4410#ifdef CONFIG_IEEE80211_WPA 4463#ifdef CONFIG_IEEE80211_WPA
4411 network->wpa_ie_len = 0; 4464 network->wpa_ie_len = 0;
4412 network->rsn_ie_len = 0; 4465 network->rsn_ie_len = 0;
4413#endif /* CONFIG_IEEE80211_WPA */ 4466#endif /* CONFIG_IEEE80211_WPA */
4414} 4467}
4415 4468
4416static void ipw_send_wep_keys(struct ipw_priv *priv) 4469static void ipw_send_wep_keys(struct ipw_priv *priv)
@@ -4464,14 +4517,12 @@ static void ipw_debug_config(struct ipw_priv *priv)
4464 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 4517 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4465 "[CFG 0x%08X]\n", priv->config); 4518 "[CFG 0x%08X]\n", priv->config);
4466 if (priv->config & CFG_STATIC_CHANNEL) 4519 if (priv->config & CFG_STATIC_CHANNEL)
4467 IPW_DEBUG_INFO("Channel locked to %d\n", 4520 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
4468 priv->channel);
4469 else 4521 else
4470 IPW_DEBUG_INFO("Channel unlocked.\n"); 4522 IPW_DEBUG_INFO("Channel unlocked.\n");
4471 if (priv->config & CFG_STATIC_ESSID) 4523 if (priv->config & CFG_STATIC_ESSID)
4472 IPW_DEBUG_INFO("ESSID locked to '%s'\n", 4524 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4473 escape_essid(priv->essid, 4525 escape_essid(priv->essid, priv->essid_len));
4474 priv->essid_len));
4475 else 4526 else
4476 IPW_DEBUG_INFO("ESSID unlocked.\n"); 4527 IPW_DEBUG_INFO("ESSID unlocked.\n");
4477 if (priv->config & CFG_STATIC_BSSID) 4528 if (priv->config & CFG_STATIC_BSSID)
@@ -4502,7 +4553,7 @@ static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4502 * Tx rates */ 4553 * Tx rates */
4503 4554
4504 switch (priv->ieee->freq_band) { 4555 switch (priv->ieee->freq_band) {
4505 case IEEE80211_52GHZ_BAND: /* A only */ 4556 case IEEE80211_52GHZ_BAND: /* A only */
4506 /* IEEE_A */ 4557 /* IEEE_A */
4507 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) { 4558 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4508 /* Invalid fixed rate mask */ 4559 /* Invalid fixed rate mask */
@@ -4513,7 +4564,7 @@ static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4513 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A; 4564 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4514 break; 4565 break;
4515 4566
4516 default: /* 2.4Ghz or Mixed */ 4567 default: /* 2.4Ghz or Mixed */
4517 /* IEEE_B */ 4568 /* IEEE_B */
4518 if (network->mode == IEEE_B) { 4569 if (network->mode == IEEE_B) {
4519 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) { 4570 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
@@ -4551,13 +4602,12 @@ static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4551 } 4602 }
4552 4603
4553 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE); 4604 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4554 ipw_write_reg32(priv, reg, *(u32*)&fr); 4605 ipw_write_reg32(priv, reg, *(u32 *) & fr);
4555} 4606}
4556 4607
4557static int ipw_associate_network(struct ipw_priv *priv, 4608static int ipw_associate_network(struct ipw_priv *priv,
4558 struct ieee80211_network *network, 4609 struct ieee80211_network *network,
4559 struct ipw_supported_rates *rates, 4610 struct ipw_supported_rates *rates, int roaming)
4560 int roaming)
4561{ 4611{
4562 int err; 4612 int err;
4563 4613
@@ -4566,7 +4616,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
4566 4616
4567 if (!(priv->config & CFG_STATIC_ESSID)) { 4617 if (!(priv->config & CFG_STATIC_ESSID)) {
4568 priv->essid_len = min(network->ssid_len, 4618 priv->essid_len = min(network->ssid_len,
4569 (u8)IW_ESSID_MAX_SIZE); 4619 (u8) IW_ESSID_MAX_SIZE);
4570 memcpy(priv->essid, network->ssid, priv->essid_len); 4620 memcpy(priv->essid, network->ssid, priv->essid_len);
4571 } 4621 }
4572 4622
@@ -4612,13 +4662,11 @@ static int ipw_associate_network(struct ipw_priv *priv,
4612 priv->capability & CAP_PRIVACY_ON ? " key=" : "", 4662 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
4613 priv->capability & CAP_PRIVACY_ON ? 4663 priv->capability & CAP_PRIVACY_ON ?
4614 '1' + priv->sec.active_key : '.', 4664 '1' + priv->sec.active_key : '.',
4615 priv->capability & CAP_PRIVACY_ON ? 4665 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
4616 '.' : ' ');
4617 4666
4618 priv->assoc_request.beacon_interval = network->beacon_interval; 4667 priv->assoc_request.beacon_interval = network->beacon_interval;
4619 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) && 4668 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
4620 (network->time_stamp[0] == 0) && 4669 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
4621 (network->time_stamp[1] == 0)) {
4622 priv->assoc_request.assoc_type = HC_IBSS_START; 4670 priv->assoc_request.assoc_type = HC_IBSS_START;
4623 priv->assoc_request.assoc_tsf_msw = 0; 4671 priv->assoc_request.assoc_tsf_msw = 0;
4624 priv->assoc_request.assoc_tsf_lsw = 0; 4672 priv->assoc_request.assoc_tsf_lsw = 0;
@@ -4637,8 +4685,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
4637 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN); 4685 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
4638 priv->assoc_request.atim_window = network->atim_window; 4686 priv->assoc_request.atim_window = network->atim_window;
4639 } else { 4687 } else {
4640 memcpy(&priv->assoc_request.dest, network->bssid, 4688 memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
4641 ETH_ALEN);
4642 priv->assoc_request.atim_window = 0; 4689 priv->assoc_request.atim_window = 0;
4643 } 4690 }
4644 4691
@@ -4772,14 +4819,13 @@ static void ipw_associate(void *data)
4772 4819
4773 if (!(priv->config & CFG_ASSOCIATE) && 4820 if (!(priv->config & CFG_ASSOCIATE) &&
4774 !(priv->config & (CFG_STATIC_ESSID | 4821 !(priv->config & (CFG_STATIC_ESSID |
4775 CFG_STATIC_CHANNEL | 4822 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
4776 CFG_STATIC_BSSID))) {
4777 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); 4823 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
4778 return; 4824 return;
4779 } 4825 }
4780 4826
4781 list_for_each_entry(network, &priv->ieee->network_list, list) 4827 list_for_each_entry(network, &priv->ieee->network_list, list)
4782 ipw_best_network(priv, &match, network, 0); 4828 ipw_best_network(priv, &match, network, 0);
4783 4829
4784 network = match.network; 4830 network = match.network;
4785 rates = &match.rates; 4831 rates = &match.rates;
@@ -4790,8 +4836,7 @@ static void ipw_associate(void *data)
4790 priv->config & CFG_STATIC_ESSID && 4836 priv->config & CFG_STATIC_ESSID &&
4791 !list_empty(&priv->ieee->network_free_list)) { 4837 !list_empty(&priv->ieee->network_free_list)) {
4792 element = priv->ieee->network_free_list.next; 4838 element = priv->ieee->network_free_list.next;
4793 network = list_entry(element, struct ieee80211_network, 4839 network = list_entry(element, struct ieee80211_network, list);
4794 list);
4795 ipw_adhoc_create(priv, network); 4840 ipw_adhoc_create(priv, network);
4796 rates = &priv->rates; 4841 rates = &priv->rates;
4797 list_del(element); 4842 list_del(element);
@@ -4813,8 +4858,8 @@ static void ipw_associate(void *data)
4813} 4858}
4814 4859
4815static inline void ipw_handle_data_packet(struct ipw_priv *priv, 4860static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4816 struct ipw_rx_mem_buffer *rxb, 4861 struct ipw_rx_mem_buffer *rxb,
4817 struct ieee80211_rx_stats *stats) 4862 struct ieee80211_rx_stats *stats)
4818{ 4863{
4819 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data; 4864 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
4820 4865
@@ -4846,11 +4891,10 @@ static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4846 4891
4847 if (!ieee80211_rx(priv->ieee, rxb->skb, stats)) 4892 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
4848 priv->ieee->stats.rx_errors++; 4893 priv->ieee->stats.rx_errors++;
4849 else /* ieee80211_rx succeeded, so it now owns the SKB */ 4894 else /* ieee80211_rx succeeded, so it now owns the SKB */
4850 rxb->skb = NULL; 4895 rxb->skb = NULL;
4851} 4896}
4852 4897
4853
4854/* 4898/*
4855 * Main entry function for recieving a packet with 80211 headers. This 4899 * Main entry function for recieving a packet with 80211 headers. This
4856 * should be called when ever the FW has notified us that there is a new 4900 * should be called when ever the FW has notified us that there is a new
@@ -4885,125 +4929,152 @@ static void ipw_rx(struct ipw_priv *priv)
4885 pkt = (struct ipw_rx_packet *)rxb->skb->data; 4929 pkt = (struct ipw_rx_packet *)rxb->skb->data;
4886 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", 4930 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
4887 pkt->header.message_type, 4931 pkt->header.message_type,
4888 pkt->header.rx_seq_num, 4932 pkt->header.rx_seq_num, pkt->header.control_bits);
4889 pkt->header.control_bits);
4890 4933
4891 switch (pkt->header.message_type) { 4934 switch (pkt->header.message_type) {
4892 case RX_FRAME_TYPE: /* 802.11 frame */ { 4935 case RX_FRAME_TYPE: /* 802.11 frame */ {
4893 struct ieee80211_rx_stats stats = { 4936 struct ieee80211_rx_stats stats = {
4894 .rssi = pkt->u.frame.rssi_dbm - 4937 .rssi = pkt->u.frame.rssi_dbm -
4895 IPW_RSSI_TO_DBM, 4938 IPW_RSSI_TO_DBM,
4896 .signal = pkt->u.frame.signal, 4939 .signal = pkt->u.frame.signal,
4897 .rate = pkt->u.frame.rate, 4940 .rate = pkt->u.frame.rate,
4898 .mac_time = jiffies, 4941 .mac_time = jiffies,
4899 .received_channel = 4942 .received_channel =
4900 pkt->u.frame.received_channel, 4943 pkt->u.frame.received_channel,
4901 .freq = (pkt->u.frame.control & (1<<0)) ? 4944 .freq =
4902 IEEE80211_24GHZ_BAND : IEEE80211_52GHZ_BAND, 4945 (pkt->u.frame.
4903 .len = pkt->u.frame.length, 4946 control & (1 << 0)) ?
4904 }; 4947 IEEE80211_24GHZ_BAND :
4905 4948 IEEE80211_52GHZ_BAND,
4906 if (stats.rssi != 0) 4949 .len = pkt->u.frame.length,
4907 stats.mask |= IEEE80211_STATMASK_RSSI; 4950 };
4908 if (stats.signal != 0) 4951
4909 stats.mask |= IEEE80211_STATMASK_SIGNAL; 4952 if (stats.rssi != 0)
4910 if (stats.rate != 0) 4953 stats.mask |= IEEE80211_STATMASK_RSSI;
4911 stats.mask |= IEEE80211_STATMASK_RATE; 4954 if (stats.signal != 0)
4912 4955 stats.mask |= IEEE80211_STATMASK_SIGNAL;
4913 priv->rx_packets++; 4956 if (stats.rate != 0)
4957 stats.mask |= IEEE80211_STATMASK_RATE;
4958
4959 priv->rx_packets++;
4914 4960
4915#ifdef CONFIG_IPW_PROMISC 4961#ifdef CONFIG_IPW_PROMISC
4916 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 4962 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4917 ipw_handle_data_packet(priv, rxb, &stats); 4963 ipw_handle_data_packet(priv, rxb,
4918 break; 4964 &stats);
4919 } 4965 break;
4966 }
4920#endif 4967#endif
4921 4968
4922 header = (struct ieee80211_hdr *)(rxb->skb->data + 4969 header =
4923 IPW_RX_FRAME_SIZE); 4970 (struct ieee80211_hdr *)(rxb->skb->data +
4971 IPW_RX_FRAME_SIZE);
4924 /* TODO: Check Ad-Hoc dest/source and make sure 4972 /* TODO: Check Ad-Hoc dest/source and make sure
4925 * that we are actually parsing these packets 4973 * that we are actually parsing these packets
4926 * correctly -- we should probably use the 4974 * correctly -- we should probably use the
4927 * frame control of the packet and disregard 4975 * frame control of the packet and disregard
4928 * the current iw_mode */ 4976 * the current iw_mode */
4929 switch (priv->ieee->iw_mode) { 4977 switch (priv->ieee->iw_mode) {
4930 case IW_MODE_ADHOC: 4978 case IW_MODE_ADHOC:
4931 network_packet = 4979 network_packet =
4932 !memcmp(header->addr1, 4980 !memcmp(header->addr1,
4933 priv->net_dev->dev_addr, 4981 priv->net_dev->dev_addr,
4934 ETH_ALEN) || 4982 ETH_ALEN) ||
4935 !memcmp(header->addr3, 4983 !memcmp(header->addr3,
4936 priv->bssid, ETH_ALEN) || 4984 priv->bssid, ETH_ALEN) ||
4937 is_broadcast_ether_addr(header->addr1) || 4985 is_broadcast_ether_addr(header->
4938 is_multicast_ether_addr(header->addr1); 4986 addr1)
4939 break; 4987 || is_multicast_ether_addr(header->
4940 4988 addr1);
4941 case IW_MODE_INFRA: 4989 break;
4942 default: 4990
4943 network_packet = 4991 case IW_MODE_INFRA:
4944 !memcmp(header->addr3, 4992 default:
4945 priv->bssid, ETH_ALEN) || 4993 network_packet =
4946 !memcmp(header->addr1, 4994 !memcmp(header->addr3,
4947 priv->net_dev->dev_addr, 4995 priv->bssid, ETH_ALEN) ||
4948 ETH_ALEN) || 4996 !memcmp(header->addr1,
4949 is_broadcast_ether_addr(header->addr1) || 4997 priv->net_dev->dev_addr,
4950 is_multicast_ether_addr(header->addr1); 4998 ETH_ALEN) ||
4999 is_broadcast_ether_addr(header->
5000 addr1)
5001 || is_multicast_ether_addr(header->
5002 addr1);
5003 break;
5004 }
5005
5006 if (network_packet && priv->assoc_network) {
5007 priv->assoc_network->stats.rssi =
5008 stats.rssi;
5009 average_add(&priv->average_rssi,
5010 stats.rssi);
5011 priv->last_rx_rssi = stats.rssi;
5012 }
5013
5014 IPW_DEBUG_RX("Frame: len=%u\n",
5015 pkt->u.frame.length);
5016
5017 if (pkt->u.frame.length < frame_hdr_len(header)) {
5018 IPW_DEBUG_DROP
5019 ("Received packet is too small. "
5020 "Dropping.\n");
5021 priv->ieee->stats.rx_errors++;
5022 priv->wstats.discard.misc++;
5023 break;
5024 }
5025
5026 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
5027 case IEEE80211_FTYPE_MGMT:
5028 ieee80211_rx_mgt(priv->ieee, header,
5029 &stats);
5030 if (priv->ieee->iw_mode == IW_MODE_ADHOC
5031 &&
5032 ((WLAN_FC_GET_STYPE
5033 (header->frame_ctl) ==
5034 IEEE80211_STYPE_PROBE_RESP)
5035 ||
5036 (WLAN_FC_GET_STYPE
5037 (header->frame_ctl) ==
5038 IEEE80211_STYPE_BEACON))
5039 && !memcmp(header->addr3,
5040 priv->bssid, ETH_ALEN))
5041 ipw_add_station(priv,
5042 header->addr2);
5043 break;
5044
5045 case IEEE80211_FTYPE_CTL:
5046 break;
5047
5048 case IEEE80211_FTYPE_DATA:
5049 if (network_packet)
5050 ipw_handle_data_packet(priv,
5051 rxb,
5052 &stats);
5053 else
5054 IPW_DEBUG_DROP("Dropping: "
5055 MAC_FMT ", "
5056 MAC_FMT ", "
5057 MAC_FMT "\n",
5058 MAC_ARG(header->
5059 addr1),
5060 MAC_ARG(header->
5061 addr2),
5062 MAC_ARG(header->
5063 addr3));
5064 break;
5065 }
4951 break; 5066 break;
4952 } 5067 }
4953 5068
4954 if (network_packet && priv->assoc_network) { 5069 case RX_HOST_NOTIFICATION_TYPE:{
4955 priv->assoc_network->stats.rssi = stats.rssi; 5070 IPW_DEBUG_RX
4956 average_add(&priv->average_rssi, 5071 ("Notification: subtype=%02X flags=%02X size=%d\n",
4957 stats.rssi);
4958 priv->last_rx_rssi = stats.rssi;
4959 }
4960
4961 IPW_DEBUG_RX("Frame: len=%u\n", pkt->u.frame.length);
4962
4963 if (pkt->u.frame.length < frame_hdr_len(header)) {
4964 IPW_DEBUG_DROP("Received packet is too small. "
4965 "Dropping.\n");
4966 priv->ieee->stats.rx_errors++;
4967 priv->wstats.discard.misc++;
4968 break;
4969 }
4970
4971 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
4972 case IEEE80211_FTYPE_MGMT:
4973 ieee80211_rx_mgt(priv->ieee, header, &stats);
4974 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4975 ((WLAN_FC_GET_STYPE(header->frame_ctl) ==
4976 IEEE80211_STYPE_PROBE_RESP) ||
4977 (WLAN_FC_GET_STYPE(header->frame_ctl) ==
4978 IEEE80211_STYPE_BEACON)) &&
4979 !memcmp(header->addr3, priv->bssid, ETH_ALEN))
4980 ipw_add_station(priv, header->addr2);
4981 break;
4982
4983 case IEEE80211_FTYPE_CTL:
4984 break;
4985
4986 case IEEE80211_FTYPE_DATA:
4987 if (network_packet)
4988 ipw_handle_data_packet(priv, rxb, &stats);
4989 else
4990 IPW_DEBUG_DROP("Dropping: " MAC_FMT
4991 ", " MAC_FMT ", " MAC_FMT "\n",
4992 MAC_ARG(header->addr1), MAC_ARG(header->addr2),
4993 MAC_ARG(header->addr3));
4994 break;
4995 }
4996 break;
4997 }
4998
4999 case RX_HOST_NOTIFICATION_TYPE: {
5000 IPW_DEBUG_RX("Notification: subtype=%02X flags=%02X size=%d\n",
5001 pkt->u.notification.subtype, 5072 pkt->u.notification.subtype,
5002 pkt->u.notification.flags, 5073 pkt->u.notification.flags,
5003 pkt->u.notification.size); 5074 pkt->u.notification.size);
5004 ipw_rx_notification(priv, &pkt->u.notification); 5075 ipw_rx_notification(priv, &pkt->u.notification);
5005 break; 5076 break;
5006 } 5077 }
5007 5078
5008 default: 5079 default:
5009 IPW_DEBUG_RX("Bad Rx packet of type %d\n", 5080 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
@@ -5088,10 +5159,10 @@ static int ipw_request_scan(struct ipw_priv *priv)
5088 /* If we are roaming, then make this a directed scan for the current 5159 /* If we are roaming, then make this a directed scan for the current
5089 * network. Otherwise, ensure that every other scan is a fast 5160 * network. Otherwise, ensure that every other scan is a fast
5090 * channel hop scan */ 5161 * channel hop scan */
5091 if ((priv->status & STATUS_ROAMING) || ( 5162 if ((priv->status & STATUS_ROAMING)
5092 !(priv->status & STATUS_ASSOCIATED) && 5163 || (!(priv->status & STATUS_ASSOCIATED)
5093 (priv->config & CFG_STATIC_ESSID) && 5164 && (priv->config & CFG_STATIC_ESSID)
5094 (scan.full_scan_index % 2))) { 5165 && (scan.full_scan_index % 2))) {
5095 err = ipw_send_ssid(priv, priv->essid, priv->essid_len); 5166 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5096 if (err) { 5167 if (err) {
5097 IPW_DEBUG_HC("Attempt to send SSID command failed.\n"); 5168 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
@@ -5103,7 +5174,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
5103 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN; 5174 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5104 } 5175 }
5105 5176
5106 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) { 5177 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5107 int start = channel_index; 5178 int start = channel_index;
5108 for (i = 0; i < MAX_A_CHANNELS; i++) { 5179 for (i = 0; i < MAX_A_CHANNELS; i++) {
5109 if (band_a_active_channel[i] == 0) 5180 if (band_a_active_channel[i] == 0)
@@ -5113,18 +5184,18 @@ static int ipw_request_scan(struct ipw_priv *priv)
5113 continue; 5184 continue;
5114 channel_index++; 5185 channel_index++;
5115 scan.channels_list[channel_index] = 5186 scan.channels_list[channel_index] =
5116 band_a_active_channel[i]; 5187 band_a_active_channel[i];
5117 ipw_set_scan_type(&scan, channel_index, scan_type); 5188 ipw_set_scan_type(&scan, channel_index, scan_type);
5118 } 5189 }
5119 5190
5120 if (start != channel_index) { 5191 if (start != channel_index) {
5121 scan.channels_list[start] = (u8)(IPW_A_MODE << 6) | 5192 scan.channels_list[start] = (u8) (IPW_A_MODE << 6) |
5122 (channel_index - start); 5193 (channel_index - start);
5123 channel_index++; 5194 channel_index++;
5124 } 5195 }
5125 } 5196 }
5126 5197
5127 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) { 5198 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5128 int start = channel_index; 5199 int start = channel_index;
5129 for (i = 0; i < MAX_B_CHANNELS; i++) { 5200 for (i = 0; i < MAX_B_CHANNELS; i++) {
5130 if (band_b_active_channel[i] == 0) 5201 if (band_b_active_channel[i] == 0)
@@ -5134,20 +5205,19 @@ static int ipw_request_scan(struct ipw_priv *priv)
5134 continue; 5205 continue;
5135 channel_index++; 5206 channel_index++;
5136 scan.channels_list[channel_index] = 5207 scan.channels_list[channel_index] =
5137 band_b_active_channel[i]; 5208 band_b_active_channel[i];
5138 ipw_set_scan_type(&scan, channel_index, scan_type); 5209 ipw_set_scan_type(&scan, channel_index, scan_type);
5139 } 5210 }
5140 5211
5141 if (start != channel_index) { 5212 if (start != channel_index) {
5142 scan.channels_list[start] = (u8)(IPW_B_MODE << 6) | 5213 scan.channels_list[start] = (u8) (IPW_B_MODE << 6) |
5143 (channel_index - start); 5214 (channel_index - start);
5144 } 5215 }
5145 } 5216 }
5146 5217
5147 err = ipw_send_scan_request_ext(priv, &scan); 5218 err = ipw_send_scan_request_ext(priv, &scan);
5148 if (err) { 5219 if (err) {
5149 IPW_DEBUG_HC("Sending scan command failed: %08X\n", 5220 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
5150 err);
5151 return -EIO; 5221 return -EIO;
5152 } 5222 }
5153 5223
@@ -5199,9 +5269,8 @@ static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5199 priv->config |= CFG_STATIC_CHANNEL; 5269 priv->config |= CFG_STATIC_CHANNEL;
5200 5270
5201 if (priv->channel == channel) { 5271 if (priv->channel == channel) {
5202 IPW_DEBUG_INFO( 5272 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
5203 "Request to set channel to current value (%d)\n", 5273 channel);
5204 channel);
5205 return 0; 5274 return 0;
5206 } 5275 }
5207 5276
@@ -5229,8 +5298,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
5229 5298
5230 /* if setting by freq convert to channel */ 5299 /* if setting by freq convert to channel */
5231 if (fwrq->e == 1) { 5300 if (fwrq->e == 1) {
5232 if ((fwrq->m >= (int) 2.412e8 && 5301 if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
5233 fwrq->m <= (int) 2.487e8)) {
5234 int f = fwrq->m / 100000; 5302 int f = fwrq->m / 100000;
5235 int c = 0; 5303 int c = 0;
5236 5304
@@ -5248,12 +5316,11 @@ static int ipw_wx_set_freq(struct net_device *dev,
5248 return -EOPNOTSUPP; 5316 return -EOPNOTSUPP;
5249 5317
5250 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 5318 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5251 return ipw_set_channel(priv, (u8)fwrq->m); 5319 return ipw_set_channel(priv, (u8) fwrq->m);
5252 5320
5253 return 0; 5321 return 0;
5254} 5322}
5255 5323
5256
5257static int ipw_wx_get_freq(struct net_device *dev, 5324static int ipw_wx_get_freq(struct net_device *dev,
5258 struct iw_request_info *info, 5325 struct iw_request_info *info,
5259 union iwreq_data *wrqu, char *extra) 5326 union iwreq_data *wrqu, char *extra)
@@ -5306,7 +5373,7 @@ static int ipw_wx_set_mode(struct net_device *dev,
5306 5373
5307 if (wrqu->mode == IW_MODE_MONITOR) 5374 if (wrqu->mode == IW_MODE_MONITOR)
5308 priv->net_dev->type = ARPHRD_IEEE80211; 5375 priv->net_dev->type = ARPHRD_IEEE80211;
5309#endif /* CONFIG_IPW_PROMISC */ 5376#endif /* CONFIG_IPW_PROMISC */
5310 5377
5311#ifdef CONFIG_PM 5378#ifdef CONFIG_PM
5312 /* Free the existing firmware and reset the fw_loaded 5379 /* Free the existing firmware and reset the fw_loaded
@@ -5324,12 +5391,12 @@ static int ipw_wx_set_mode(struct net_device *dev,
5324 priv->ieee->iw_mode = wrqu->mode; 5391 priv->ieee->iw_mode = wrqu->mode;
5325 ipw_adapter_restart(priv); 5392 ipw_adapter_restart(priv);
5326 5393
5327 return err; 5394 return err;
5328} 5395}
5329 5396
5330static int ipw_wx_get_mode(struct net_device *dev, 5397static int ipw_wx_get_mode(struct net_device *dev,
5331 struct iw_request_info *info, 5398 struct iw_request_info *info,
5332 union iwreq_data *wrqu, char *extra) 5399 union iwreq_data *wrqu, char *extra)
5333{ 5400{
5334 struct ipw_priv *priv = ieee80211_priv(dev); 5401 struct ipw_priv *priv = ieee80211_priv(dev);
5335 5402
@@ -5339,7 +5406,6 @@ static int ipw_wx_get_mode(struct net_device *dev,
5339 return 0; 5406 return 0;
5340} 5407}
5341 5408
5342
5343#define DEFAULT_RTS_THRESHOLD 2304U 5409#define DEFAULT_RTS_THRESHOLD 2304U
5344#define MIN_RTS_THRESHOLD 1U 5410#define MIN_RTS_THRESHOLD 1U
5345#define MAX_RTS_THRESHOLD 2304U 5411#define MAX_RTS_THRESHOLD 2304U
@@ -5383,19 +5449,19 @@ static int ipw_wx_get_range(struct net_device *dev,
5383 /* TODO: Find real max RSSI and stick here */ 5449 /* TODO: Find real max RSSI and stick here */
5384 range->max_qual.level = 0; 5450 range->max_qual.level = 0;
5385 range->max_qual.noise = 0; 5451 range->max_qual.noise = 0;
5386 range->max_qual.updated = 7; /* Updated all three */ 5452 range->max_qual.updated = 7; /* Updated all three */
5387 5453
5388 range->avg_qual.qual = 70; 5454 range->avg_qual.qual = 70;
5389 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */ 5455 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
5390 range->avg_qual.level = 0; /* FIXME to real average level */ 5456 range->avg_qual.level = 0; /* FIXME to real average level */
5391 range->avg_qual.noise = 0; 5457 range->avg_qual.noise = 0;
5392 range->avg_qual.updated = 7; /* Updated all three */ 5458 range->avg_qual.updated = 7; /* Updated all three */
5393 5459
5394 range->num_bitrates = min(priv->rates.num_rates, (u8)IW_MAX_BITRATES); 5460 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
5395 5461
5396 for (i = 0; i < range->num_bitrates; i++) 5462 for (i = 0; i < range->num_bitrates; i++)
5397 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) * 5463 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
5398 500000; 5464 500000;
5399 5465
5400 range->max_rts = DEFAULT_RTS_THRESHOLD; 5466 range->max_rts = DEFAULT_RTS_THRESHOLD;
5401 range->min_frag = MIN_FRAG_THRESHOLD; 5467 range->min_frag = MIN_FRAG_THRESHOLD;
@@ -5410,7 +5476,7 @@ static int ipw_wx_get_range(struct net_device *dev,
5410 range->we_version_compiled = WIRELESS_EXT; 5476 range->we_version_compiled = WIRELESS_EXT;
5411 range->we_version_source = 16; 5477 range->we_version_source = 16;
5412 5478
5413 range->num_channels = FREQ_COUNT; 5479 range->num_channels = FREQ_COUNT;
5414 5480
5415 val = 0; 5481 val = 0;
5416 for (i = 0; i < FREQ_COUNT; i++) { 5482 for (i = 0; i < FREQ_COUNT; i++) {
@@ -5506,7 +5572,7 @@ static int ipw_wx_set_essid(struct net_device *dev,
5506 union iwreq_data *wrqu, char *extra) 5572 union iwreq_data *wrqu, char *extra)
5507{ 5573{
5508 struct ipw_priv *priv = ieee80211_priv(dev); 5574 struct ipw_priv *priv = ieee80211_priv(dev);
5509 char *essid = ""; /* ANY */ 5575 char *essid = ""; /* ANY */
5510 int length = 0; 5576 int length = 0;
5511 5577
5512 if (wrqu->essid.flags && wrqu->essid.length) { 5578 if (wrqu->essid.flags && wrqu->essid.length) {
@@ -5567,11 +5633,11 @@ static int ipw_wx_get_essid(struct net_device *dev,
5567 escape_essid(priv->essid, priv->essid_len)); 5633 escape_essid(priv->essid, priv->essid_len));
5568 memcpy(extra, priv->essid, priv->essid_len); 5634 memcpy(extra, priv->essid, priv->essid_len);
5569 wrqu->essid.length = priv->essid_len; 5635 wrqu->essid.length = priv->essid_len;
5570 wrqu->essid.flags = 1; /* active */ 5636 wrqu->essid.flags = 1; /* active */
5571 } else { 5637 } else {
5572 IPW_DEBUG_WX("Getting essid: ANY\n"); 5638 IPW_DEBUG_WX("Getting essid: ANY\n");
5573 wrqu->essid.length = 0; 5639 wrqu->essid.length = 0;
5574 wrqu->essid.flags = 0; /* active */ 5640 wrqu->essid.flags = 0; /* active */
5575 } 5641 }
5576 5642
5577 return 0; 5643 return 0;
@@ -5587,15 +5653,14 @@ static int ipw_wx_set_nick(struct net_device *dev,
5587 if (wrqu->data.length > IW_ESSID_MAX_SIZE) 5653 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
5588 return -E2BIG; 5654 return -E2BIG;
5589 5655
5590 wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick)); 5656 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
5591 memset(priv->nick, 0, sizeof(priv->nick)); 5657 memset(priv->nick, 0, sizeof(priv->nick));
5592 memcpy(priv->nick, extra, wrqu->data.length); 5658 memcpy(priv->nick, extra, wrqu->data.length);
5593 IPW_DEBUG_TRACE("<<\n"); 5659 IPW_DEBUG_TRACE("<<\n");
5594 return 0; 5660 return 0;
5595 5661
5596} 5662}
5597 5663
5598
5599static int ipw_wx_get_nick(struct net_device *dev, 5664static int ipw_wx_get_nick(struct net_device *dev,
5600 struct iw_request_info *info, 5665 struct iw_request_info *info,
5601 union iwreq_data *wrqu, char *extra) 5666 union iwreq_data *wrqu, char *extra)
@@ -5604,11 +5669,10 @@ static int ipw_wx_get_nick(struct net_device *dev,
5604 IPW_DEBUG_WX("Getting nick\n"); 5669 IPW_DEBUG_WX("Getting nick\n");
5605 wrqu->data.length = strlen(priv->nick) + 1; 5670 wrqu->data.length = strlen(priv->nick) + 1;
5606 memcpy(extra, priv->nick, wrqu->data.length); 5671 memcpy(extra, priv->nick, wrqu->data.length);
5607 wrqu->data.flags = 1; /* active */ 5672 wrqu->data.flags = 1; /* active */
5608 return 0; 5673 return 0;
5609} 5674}
5610 5675
5611
5612static int ipw_wx_set_rate(struct net_device *dev, 5676static int ipw_wx_set_rate(struct net_device *dev,
5613 struct iw_request_info *info, 5677 struct iw_request_info *info,
5614 union iwreq_data *wrqu, char *extra) 5678 union iwreq_data *wrqu, char *extra)
@@ -5621,14 +5685,13 @@ static int ipw_wx_get_rate(struct net_device *dev,
5621 struct iw_request_info *info, 5685 struct iw_request_info *info,
5622 union iwreq_data *wrqu, char *extra) 5686 union iwreq_data *wrqu, char *extra)
5623{ 5687{
5624 struct ipw_priv * priv = ieee80211_priv(dev); 5688 struct ipw_priv *priv = ieee80211_priv(dev);
5625 wrqu->bitrate.value = priv->last_rate; 5689 wrqu->bitrate.value = priv->last_rate;
5626 5690
5627 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 5691 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
5628 return 0; 5692 return 0;
5629} 5693}
5630 5694
5631
5632static int ipw_wx_set_rts(struct net_device *dev, 5695static int ipw_wx_set_rts(struct net_device *dev,
5633 struct iw_request_info *info, 5696 struct iw_request_info *info,
5634 union iwreq_data *wrqu, char *extra) 5697 union iwreq_data *wrqu, char *extra)
@@ -5657,14 +5720,12 @@ static int ipw_wx_get_rts(struct net_device *dev,
5657 struct ipw_priv *priv = ieee80211_priv(dev); 5720 struct ipw_priv *priv = ieee80211_priv(dev);
5658 wrqu->rts.value = priv->rts_threshold; 5721 wrqu->rts.value = priv->rts_threshold;
5659 wrqu->rts.fixed = 0; /* no auto select */ 5722 wrqu->rts.fixed = 0; /* no auto select */
5660 wrqu->rts.disabled = 5723 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5661 (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5662 5724
5663 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); 5725 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
5664 return 0; 5726 return 0;
5665} 5727}
5666 5728
5667
5668static int ipw_wx_set_txpow(struct net_device *dev, 5729static int ipw_wx_set_txpow(struct net_device *dev,
5669 struct iw_request_info *info, 5730 struct iw_request_info *info,
5670 union iwreq_data *wrqu, char *extra) 5731 union iwreq_data *wrqu, char *extra)
@@ -5679,8 +5740,7 @@ static int ipw_wx_set_txpow(struct net_device *dev,
5679 if (wrqu->power.flags != IW_TXPOW_DBM) 5740 if (wrqu->power.flags != IW_TXPOW_DBM)
5680 return -EINVAL; 5741 return -EINVAL;
5681 5742
5682 if ((wrqu->power.value > 20) || 5743 if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
5683 (wrqu->power.value < -12))
5684 return -EINVAL; 5744 return -EINVAL;
5685 5745
5686 priv->tx_power = wrqu->power.value; 5746 priv->tx_power = wrqu->power.value;
@@ -5704,11 +5764,10 @@ static int ipw_wx_set_txpow(struct net_device *dev,
5704 5764
5705 return 0; 5765 return 0;
5706 5766
5707 error: 5767 error:
5708 return -EIO; 5768 return -EIO;
5709} 5769}
5710 5770
5711
5712static int ipw_wx_get_txpow(struct net_device *dev, 5771static int ipw_wx_get_txpow(struct net_device *dev,
5713 struct iw_request_info *info, 5772 struct iw_request_info *info,
5714 union iwreq_data *wrqu, char *extra) 5773 union iwreq_data *wrqu, char *extra)
@@ -5721,15 +5780,14 @@ static int ipw_wx_get_txpow(struct net_device *dev,
5721 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 5780 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
5722 5781
5723 IPW_DEBUG_WX("GET TX Power -> %s %d \n", 5782 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
5724 wrqu->power.disabled ? "ON" : "OFF", 5783 wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
5725 wrqu->power.value);
5726 5784
5727 return 0; 5785 return 0;
5728} 5786}
5729 5787
5730static int ipw_wx_set_frag(struct net_device *dev, 5788static int ipw_wx_set_frag(struct net_device *dev,
5731 struct iw_request_info *info, 5789 struct iw_request_info *info,
5732 union iwreq_data *wrqu, char *extra) 5790 union iwreq_data *wrqu, char *extra)
5733{ 5791{
5734 struct ipw_priv *priv = ieee80211_priv(dev); 5792 struct ipw_priv *priv = ieee80211_priv(dev);
5735 5793
@@ -5749,14 +5807,13 @@ static int ipw_wx_set_frag(struct net_device *dev,
5749} 5807}
5750 5808
5751static int ipw_wx_get_frag(struct net_device *dev, 5809static int ipw_wx_get_frag(struct net_device *dev,
5752 struct iw_request_info *info, 5810 struct iw_request_info *info,
5753 union iwreq_data *wrqu, char *extra) 5811 union iwreq_data *wrqu, char *extra)
5754{ 5812{
5755 struct ipw_priv *priv = ieee80211_priv(dev); 5813 struct ipw_priv *priv = ieee80211_priv(dev);
5756 wrqu->frag.value = priv->ieee->fts; 5814 wrqu->frag.value = priv->ieee->fts;
5757 wrqu->frag.fixed = 0; /* no auto select */ 5815 wrqu->frag.fixed = 0; /* no auto select */
5758 wrqu->frag.disabled = 5816 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
5759 (wrqu->frag.value == DEFAULT_FTS);
5760 5817
5761 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 5818 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
5762 5819
@@ -5771,7 +5828,6 @@ static int ipw_wx_set_retry(struct net_device *dev,
5771 return -EOPNOTSUPP; 5828 return -EOPNOTSUPP;
5772} 5829}
5773 5830
5774
5775static int ipw_wx_get_retry(struct net_device *dev, 5831static int ipw_wx_get_retry(struct net_device *dev,
5776 struct iw_request_info *info, 5832 struct iw_request_info *info,
5777 union iwreq_data *wrqu, char *extra) 5833 union iwreq_data *wrqu, char *extra)
@@ -5780,7 +5836,6 @@ static int ipw_wx_get_retry(struct net_device *dev,
5780 return -EOPNOTSUPP; 5836 return -EOPNOTSUPP;
5781} 5837}
5782 5838
5783
5784static int ipw_wx_set_scan(struct net_device *dev, 5839static int ipw_wx_set_scan(struct net_device *dev,
5785 struct iw_request_info *info, 5840 struct iw_request_info *info,
5786 union iwreq_data *wrqu, char *extra) 5841 union iwreq_data *wrqu, char *extra)
@@ -5801,24 +5856,24 @@ static int ipw_wx_get_scan(struct net_device *dev,
5801} 5856}
5802 5857
5803static int ipw_wx_set_encode(struct net_device *dev, 5858static int ipw_wx_set_encode(struct net_device *dev,
5804 struct iw_request_info *info, 5859 struct iw_request_info *info,
5805 union iwreq_data *wrqu, char *key) 5860 union iwreq_data *wrqu, char *key)
5806{ 5861{
5807 struct ipw_priv *priv = ieee80211_priv(dev); 5862 struct ipw_priv *priv = ieee80211_priv(dev);
5808 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key); 5863 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
5809} 5864}
5810 5865
5811static int ipw_wx_get_encode(struct net_device *dev, 5866static int ipw_wx_get_encode(struct net_device *dev,
5812 struct iw_request_info *info, 5867 struct iw_request_info *info,
5813 union iwreq_data *wrqu, char *key) 5868 union iwreq_data *wrqu, char *key)
5814{ 5869{
5815 struct ipw_priv *priv = ieee80211_priv(dev); 5870 struct ipw_priv *priv = ieee80211_priv(dev);
5816 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key); 5871 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
5817} 5872}
5818 5873
5819static int ipw_wx_set_power(struct net_device *dev, 5874static int ipw_wx_set_power(struct net_device *dev,
5820 struct iw_request_info *info, 5875 struct iw_request_info *info,
5821 union iwreq_data *wrqu, char *extra) 5876 union iwreq_data *wrqu, char *extra)
5822{ 5877{
5823 struct ipw_priv *priv = ieee80211_priv(dev); 5878 struct ipw_priv *priv = ieee80211_priv(dev);
5824 int err; 5879 int err;
@@ -5837,11 +5892,11 @@ static int ipw_wx_set_power(struct net_device *dev,
5837 } 5892 }
5838 5893
5839 switch (wrqu->power.flags & IW_POWER_MODE) { 5894 switch (wrqu->power.flags & IW_POWER_MODE) {
5840 case IW_POWER_ON: /* If not specified */ 5895 case IW_POWER_ON: /* If not specified */
5841 case IW_POWER_MODE: /* If set all mask */ 5896 case IW_POWER_MODE: /* If set all mask */
5842 case IW_POWER_ALL_R: /* If explicitely state all */ 5897 case IW_POWER_ALL_R: /* If explicitely state all */
5843 break; 5898 break;
5844 default: /* Otherwise we don't support it */ 5899 default: /* Otherwise we don't support it */
5845 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", 5900 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
5846 wrqu->power.flags); 5901 wrqu->power.flags);
5847 return -EOPNOTSUPP; 5902 return -EOPNOTSUPP;
@@ -5849,7 +5904,7 @@ static int ipw_wx_set_power(struct net_device *dev,
5849 5904
5850 /* If the user hasn't specified a power management mode yet, default 5905 /* If the user hasn't specified a power management mode yet, default
5851 * to BATTERY */ 5906 * to BATTERY */
5852 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC) 5907 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
5853 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY; 5908 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
5854 else 5909 else
5855 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; 5910 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
@@ -5859,15 +5914,14 @@ static int ipw_wx_set_power(struct net_device *dev,
5859 return err; 5914 return err;
5860 } 5915 }
5861 5916
5862 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", 5917 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
5863 priv->power_mode);
5864 5918
5865 return 0; 5919 return 0;
5866} 5920}
5867 5921
5868static int ipw_wx_get_power(struct net_device *dev, 5922static int ipw_wx_get_power(struct net_device *dev,
5869 struct iw_request_info *info, 5923 struct iw_request_info *info,
5870 union iwreq_data *wrqu, char *extra) 5924 union iwreq_data *wrqu, char *extra)
5871{ 5925{
5872 struct ipw_priv *priv = ieee80211_priv(dev); 5926 struct ipw_priv *priv = ieee80211_priv(dev);
5873 5927
@@ -5883,8 +5937,8 @@ static int ipw_wx_get_power(struct net_device *dev,
5883} 5937}
5884 5938
5885static int ipw_wx_set_powermode(struct net_device *dev, 5939static int ipw_wx_set_powermode(struct net_device *dev,
5886 struct iw_request_info *info, 5940 struct iw_request_info *info,
5887 union iwreq_data *wrqu, char *extra) 5941 union iwreq_data *wrqu, char *extra)
5888{ 5942{
5889 struct ipw_priv *priv = ieee80211_priv(dev); 5943 struct ipw_priv *priv = ieee80211_priv(dev);
5890 int mode = *(int *)extra; 5944 int mode = *(int *)extra;
@@ -5911,8 +5965,8 @@ static int ipw_wx_set_powermode(struct net_device *dev,
5911 5965
5912#define MAX_WX_STRING 80 5966#define MAX_WX_STRING 80
5913static int ipw_wx_get_powermode(struct net_device *dev, 5967static int ipw_wx_get_powermode(struct net_device *dev,
5914 struct iw_request_info *info, 5968 struct iw_request_info *info,
5915 union iwreq_data *wrqu, char *extra) 5969 union iwreq_data *wrqu, char *extra)
5916{ 5970{
5917 struct ipw_priv *priv = ieee80211_priv(dev); 5971 struct ipw_priv *priv = ieee80211_priv(dev);
5918 int level = IPW_POWER_LEVEL(priv->power_mode); 5972 int level = IPW_POWER_LEVEL(priv->power_mode);
@@ -5935,7 +5989,7 @@ static int ipw_wx_get_powermode(struct net_device *dev,
5935 } 5989 }
5936 5990
5937 if (!(priv->power_mode & IPW_POWER_ENABLED)) 5991 if (!(priv->power_mode & IPW_POWER_ENABLED))
5938 p += snprintf(p, MAX_WX_STRING - (p - extra)," OFF"); 5992 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
5939 5993
5940 wrqu->data.length = p - extra + 1; 5994 wrqu->data.length = p - extra + 1;
5941 5995
@@ -5943,16 +5997,15 @@ static int ipw_wx_get_powermode(struct net_device *dev,
5943} 5997}
5944 5998
5945static int ipw_wx_set_wireless_mode(struct net_device *dev, 5999static int ipw_wx_set_wireless_mode(struct net_device *dev,
5946 struct iw_request_info *info, 6000 struct iw_request_info *info,
5947 union iwreq_data *wrqu, char *extra) 6001 union iwreq_data *wrqu, char *extra)
5948{ 6002{
5949 struct ipw_priv *priv = ieee80211_priv(dev); 6003 struct ipw_priv *priv = ieee80211_priv(dev);
5950 int mode = *(int *)extra; 6004 int mode = *(int *)extra;
5951 u8 band = 0, modulation = 0; 6005 u8 band = 0, modulation = 0;
5952 6006
5953 if (mode == 0 || mode & ~IEEE_MODE_MASK) { 6007 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
5954 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", 6008 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
5955 mode);
5956 return -EINVAL; 6009 return -EINVAL;
5957 } 6010 }
5958 6011
@@ -5988,31 +6041,30 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
5988 priv->ieee->mode = mode; 6041 priv->ieee->mode = mode;
5989 priv->ieee->freq_band = band; 6042 priv->ieee->freq_band = band;
5990 priv->ieee->modulation = modulation; 6043 priv->ieee->modulation = modulation;
5991 init_supported_rates(priv, &priv->rates); 6044 init_supported_rates(priv, &priv->rates);
5992 6045
5993 /* If we are currently associated, or trying to associate 6046 /* If we are currently associated, or trying to associate
5994 * then see if this is a new configuration (causing us to 6047 * then see if this is a new configuration (causing us to
5995 * disassociate) */ 6048 * disassociate) */
5996 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 6049 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5997 /* The resulting association will trigger 6050 /* The resulting association will trigger
5998 * the new rates to be sent to the device */ 6051 * the new rates to be sent to the device */
5999 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n"); 6052 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
6000 ipw_disassociate(priv); 6053 ipw_disassociate(priv);
6001 } else 6054 } else
6002 ipw_send_supported_rates(priv, &priv->rates); 6055 ipw_send_supported_rates(priv, &priv->rates);
6003 6056
6004 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n", 6057 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
6005 mode & IEEE_A ? 'a' : '.', 6058 mode & IEEE_A ? 'a' : '.',
6006 mode & IEEE_B ? 'b' : '.', 6059 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
6007 mode & IEEE_G ? 'g' : '.');
6008 return 0; 6060 return 0;
6009} 6061}
6010 6062
6011static int ipw_wx_get_wireless_mode(struct net_device *dev, 6063static int ipw_wx_get_wireless_mode(struct net_device *dev,
6012 struct iw_request_info *info, 6064 struct iw_request_info *info,
6013 union iwreq_data *wrqu, char *extra) 6065 union iwreq_data *wrqu, char *extra)
6014{ 6066{
6015 struct ipw_priv *priv = ieee80211_priv(dev); 6067 struct ipw_priv *priv = ieee80211_priv(dev);
6016 6068
6017 switch (priv->ieee->freq_band) { 6069 switch (priv->ieee->freq_band) {
6018 case IEEE80211_24GHZ_BAND: 6070 case IEEE80211_24GHZ_BAND:
@@ -6033,7 +6085,7 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev,
6033 strncpy(extra, "802.11a (1)", MAX_WX_STRING); 6085 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6034 break; 6086 break;
6035 6087
6036 default: /* Mixed Band */ 6088 default: /* Mixed Band */
6037 switch (priv->ieee->modulation) { 6089 switch (priv->ieee->modulation) {
6038 case IEEE80211_CCK_MODULATION: 6090 case IEEE80211_CCK_MODULATION:
6039 strncpy(extra, "802.11ab (3)", MAX_WX_STRING); 6091 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
@@ -6050,9 +6102,9 @@ static int ipw_wx_get_wireless_mode(struct net_device *dev,
6050 6102
6051 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra); 6103 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6052 6104
6053 wrqu->data.length = strlen(extra) + 1; 6105 wrqu->data.length = strlen(extra) + 1;
6054 6106
6055 return 0; 6107 return 0;
6056} 6108}
6057 6109
6058#ifdef CONFIG_IPW_PROMISC 6110#ifdef CONFIG_IPW_PROMISC
@@ -6081,7 +6133,6 @@ static int ipw_wx_set_promisc(struct net_device *dev,
6081 return 0; 6133 return 0;
6082} 6134}
6083 6135
6084
6085static int ipw_wx_reset(struct net_device *dev, 6136static int ipw_wx_reset(struct net_device *dev,
6086 struct iw_request_info *info, 6137 struct iw_request_info *info,
6087 union iwreq_data *wrqu, char *extra) 6138 union iwreq_data *wrqu, char *extra)
@@ -6091,40 +6142,39 @@ static int ipw_wx_reset(struct net_device *dev,
6091 ipw_adapter_restart(priv); 6142 ipw_adapter_restart(priv);
6092 return 0; 6143 return 0;
6093} 6144}
6094#endif // CONFIG_IPW_PROMISC 6145#endif // CONFIG_IPW_PROMISC
6095 6146
6096/* Rebase the WE IOCTLs to zero for the handler array */ 6147/* Rebase the WE IOCTLs to zero for the handler array */
6097#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 6148#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6098static iw_handler ipw_wx_handlers[] = 6149static iw_handler ipw_wx_handlers[] = {
6099{ 6150 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6100 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name, 6151 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6101 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 6152 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6102 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 6153 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6103 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 6154 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6104 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, 6155 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6105 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, 6156 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6106 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, 6157 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6107 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, 6158 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6108 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, 6159 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6109 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, 6160 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6110 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, 6161 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6111 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, 6162 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6112 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, 6163 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6113 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, 6164 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6114 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, 6165 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6115 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, 6166 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6116 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, 6167 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6117 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, 6168 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6118 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, 6169 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6119 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, 6170 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6120 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, 6171 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6121 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, 6172 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6122 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, 6173 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6123 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, 6174 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6124 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, 6175 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6125 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, 6176 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6126 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, 6177 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6127 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6128}; 6178};
6129 6179
6130#define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV 6180#define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
@@ -6134,38 +6184,31 @@ static iw_handler ipw_wx_handlers[] =
6134#define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4 6184#define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
6135#define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5 6185#define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
6136 6186
6137
6138static struct iw_priv_args ipw_priv_args[] = { 6187static struct iw_priv_args ipw_priv_args[] = {
6139 { 6188 {
6140 .cmd = IPW_PRIV_SET_POWER, 6189 .cmd = IPW_PRIV_SET_POWER,
6141 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 6190 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6142 .name = "set_power" 6191 .name = "set_power"},
6143 },
6144 { 6192 {
6145 .cmd = IPW_PRIV_GET_POWER, 6193 .cmd = IPW_PRIV_GET_POWER,
6146 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 6194 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6147 .name = "get_power" 6195 .name = "get_power"},
6148 },
6149 { 6196 {
6150 .cmd = IPW_PRIV_SET_MODE, 6197 .cmd = IPW_PRIV_SET_MODE,
6151 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 6198 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6152 .name = "set_mode" 6199 .name = "set_mode"},
6153 },
6154 { 6200 {
6155 .cmd = IPW_PRIV_GET_MODE, 6201 .cmd = IPW_PRIV_GET_MODE,
6156 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING, 6202 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6157 .name = "get_mode" 6203 .name = "get_mode"},
6158 },
6159#ifdef CONFIG_IPW_PROMISC 6204#ifdef CONFIG_IPW_PROMISC
6160 { 6205 {
6161 IPW_PRIV_SET_PROMISC, 6206 IPW_PRIV_SET_PROMISC,
6162 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor" 6207 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
6163 },
6164 { 6208 {
6165 IPW_PRIV_RESET, 6209 IPW_PRIV_RESET,
6166 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset" 6210 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
6167 }, 6211#endif /* CONFIG_IPW_PROMISC */
6168#endif /* CONFIG_IPW_PROMISC */
6169}; 6212};
6170 6213
6171static iw_handler ipw_priv_handler[] = { 6214static iw_handler ipw_priv_handler[] = {
@@ -6179,25 +6222,21 @@ static iw_handler ipw_priv_handler[] = {
6179#endif 6222#endif
6180}; 6223};
6181 6224
6182static struct iw_handler_def ipw_wx_handler_def = 6225static struct iw_handler_def ipw_wx_handler_def = {
6183{ 6226 .standard = ipw_wx_handlers,
6184 .standard = ipw_wx_handlers, 6227 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6185 .num_standard = ARRAY_SIZE(ipw_wx_handlers), 6228 .num_private = ARRAY_SIZE(ipw_priv_handler),
6186 .num_private = ARRAY_SIZE(ipw_priv_handler), 6229 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6187 .num_private_args = ARRAY_SIZE(ipw_priv_args), 6230 .private = ipw_priv_handler,
6188 .private = ipw_priv_handler, 6231 .private_args = ipw_priv_args,
6189 .private_args = ipw_priv_args,
6190}; 6232};
6191 6233
6192
6193
6194
6195/* 6234/*
6196 * Get wireless statistics. 6235 * Get wireless statistics.
6197 * Called by /proc/net/wireless 6236 * Called by /proc/net/wireless
6198 * Also called by SIOCGIWSTATS 6237 * Also called by SIOCGIWSTATS
6199 */ 6238 */
6200static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev) 6239static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
6201{ 6240{
6202 struct ipw_priv *priv = ieee80211_priv(dev); 6241 struct ipw_priv *priv = ieee80211_priv(dev);
6203 struct iw_statistics *wstats; 6242 struct iw_statistics *wstats;
@@ -6217,7 +6256,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev)
6217 wstats->qual.noise = 0; 6256 wstats->qual.noise = 0;
6218 wstats->qual.updated = 7; 6257 wstats->qual.updated = 7;
6219 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 6258 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
6220 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; 6259 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
6221 return wstats; 6260 return wstats;
6222 } 6261 }
6223 6262
@@ -6225,7 +6264,7 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev)
6225 wstats->qual.level = average_value(&priv->average_rssi); 6264 wstats->qual.level = average_value(&priv->average_rssi);
6226 wstats->qual.noise = average_value(&priv->average_noise); 6265 wstats->qual.noise = average_value(&priv->average_noise);
6227 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 6266 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
6228 IW_QUAL_NOISE_UPDATED; 6267 IW_QUAL_NOISE_UPDATED;
6229 6268
6230 wstats->miss.beacon = average_value(&priv->average_missed_beacons); 6269 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
6231 wstats->discard.retries = priv->last_tx_failures; 6270 wstats->discard.retries = priv->last_tx_failures;
@@ -6238,13 +6277,12 @@ static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev)
6238 return wstats; 6277 return wstats;
6239} 6278}
6240 6279
6241
6242/* net device stuff */ 6280/* net device stuff */
6243 6281
6244static inline void init_sys_config(struct ipw_sys_config *sys_config) 6282static inline void init_sys_config(struct ipw_sys_config *sys_config)
6245{ 6283{
6246 memset(sys_config, 0, sizeof(struct ipw_sys_config)); 6284 memset(sys_config, 0, sizeof(struct ipw_sys_config));
6247 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */ 6285 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
6248 sys_config->answer_broadcast_ssid_probe = 0; 6286 sys_config->answer_broadcast_ssid_probe = 0;
6249 sys_config->accept_all_data_frames = 0; 6287 sys_config->accept_all_data_frames = 0;
6250 sys_config->accept_non_directed_frames = 1; 6288 sys_config->accept_non_directed_frames = 1;
@@ -6253,7 +6291,7 @@ static inline void init_sys_config(struct ipw_sys_config *sys_config)
6253 sys_config->exclude_multicast_unencrypted = 0; 6291 sys_config->exclude_multicast_unencrypted = 0;
6254 sys_config->disable_multicast_decryption = 1; 6292 sys_config->disable_multicast_decryption = 1;
6255 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH; 6293 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
6256 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */ 6294 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
6257 sys_config->dot11g_auto_detection = 0; 6295 sys_config->dot11g_auto_detection = 0;
6258 sys_config->enable_cts_to_self = 0; 6296 sys_config->enable_cts_to_self = 0;
6259 sys_config->bt_coexist_collision_thr = 0; 6297 sys_config->bt_coexist_collision_thr = 0;
@@ -6288,7 +6326,7 @@ we need to heavily modify the ieee80211_skb_to_txb.
6288static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb) 6326static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6289{ 6327{
6290 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) 6328 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
6291 txb->fragments[0]->data; 6329 txb->fragments[0]->data;
6292 int i = 0; 6330 int i = 0;
6293 struct tfd_frame *tfd; 6331 struct tfd_frame *tfd;
6294 struct clx2_tx_queue *txq = &priv->txq[0]; 6332 struct clx2_tx_queue *txq = &priv->txq[0];
@@ -6300,7 +6338,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6300 case IW_MODE_ADHOC: 6338 case IW_MODE_ADHOC:
6301 hdr_len = IEEE80211_3ADDR_LEN; 6339 hdr_len = IEEE80211_3ADDR_LEN;
6302 unicast = !is_broadcast_ether_addr(hdr->addr1) && 6340 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
6303 !is_multicast_ether_addr(hdr->addr1); 6341 !is_multicast_ether_addr(hdr->addr1);
6304 id = ipw_find_station(priv, hdr->addr1); 6342 id = ipw_find_station(priv, hdr->addr1);
6305 if (id == IPW_INVALID_STATION) { 6343 if (id == IPW_INVALID_STATION) {
6306 id = ipw_add_station(priv, hdr->addr1); 6344 id = ipw_add_station(priv, hdr->addr1);
@@ -6316,7 +6354,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6316 case IW_MODE_INFRA: 6354 case IW_MODE_INFRA:
6317 default: 6355 default:
6318 unicast = !is_broadcast_ether_addr(hdr->addr3) && 6356 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
6319 !is_multicast_ether_addr(hdr->addr3); 6357 !is_multicast_ether_addr(hdr->addr3);
6320 hdr_len = IEEE80211_3ADDR_LEN; 6358 hdr_len = IEEE80211_3ADDR_LEN;
6321 id = 0; 6359 id = 0;
6322 break; 6360 break;
@@ -6349,7 +6387,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6349 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len); 6387 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
6350 6388
6351 /* payload */ 6389 /* payload */
6352 tfd->u.data.num_chunks = min((u8)(NUM_TFD_CHUNKS - 2), txb->nr_frags); 6390 tfd->u.data.num_chunks = min((u8) (NUM_TFD_CHUNKS - 2), txb->nr_frags);
6353 for (i = 0; i < tfd->u.data.num_chunks; i++) { 6391 for (i = 0; i < tfd->u.data.num_chunks; i++) {
6354 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n", 6392 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
6355 i, tfd->u.data.num_chunks, 6393 i, tfd->u.data.num_chunks,
@@ -6357,9 +6395,11 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6357 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len, 6395 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
6358 txb->fragments[i]->len - hdr_len); 6396 txb->fragments[i]->len - hdr_len);
6359 6397
6360 tfd->u.data.chunk_ptr[i] = pci_map_single( 6398 tfd->u.data.chunk_ptr[i] =
6361 priv->pci_dev, txb->fragments[i]->data + hdr_len, 6399 pci_map_single(priv->pci_dev,
6362 txb->fragments[i]->len - hdr_len, PCI_DMA_TODEVICE); 6400 txb->fragments[i]->data + hdr_len,
6401 txb->fragments[i]->len - hdr_len,
6402 PCI_DMA_TODEVICE);
6363 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len; 6403 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
6364 } 6404 }
6365 6405
@@ -6379,16 +6419,16 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6379 for (j = i; j < txb->nr_frags; j++) { 6419 for (j = i; j < txb->nr_frags; j++) {
6380 int size = txb->fragments[j]->len - hdr_len; 6420 int size = txb->fragments[j]->len - hdr_len;
6381 printk(KERN_INFO "Adding frag %d %d...\n", 6421 printk(KERN_INFO "Adding frag %d %d...\n",
6382 j, size); 6422 j, size);
6383 memcpy(skb_put(skb, size), 6423 memcpy(skb_put(skb, size),
6384 txb->fragments[j]->data + hdr_len, 6424 txb->fragments[j]->data + hdr_len, size);
6385 size);
6386 } 6425 }
6387 dev_kfree_skb_any(txb->fragments[i]); 6426 dev_kfree_skb_any(txb->fragments[i]);
6388 txb->fragments[i] = skb; 6427 txb->fragments[i] = skb;
6389 tfd->u.data.chunk_ptr[i] = pci_map_single( 6428 tfd->u.data.chunk_ptr[i] =
6390 priv->pci_dev, skb->data, 6429 pci_map_single(priv->pci_dev, skb->data,
6391 tfd->u.data.chunk_len[i], PCI_DMA_TODEVICE); 6430 tfd->u.data.chunk_len[i],
6431 PCI_DMA_TODEVICE);
6392 tfd->u.data.num_chunks++; 6432 tfd->u.data.num_chunks++;
6393 } 6433 }
6394 } 6434 }
@@ -6402,7 +6442,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6402 6442
6403 return; 6443 return;
6404 6444
6405 drop: 6445 drop:
6406 IPW_DEBUG_DROP("Silently dropping Tx packet.\n"); 6446 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
6407 ieee80211_txb_free(txb); 6447 ieee80211_txb_free(txb);
6408} 6448}
@@ -6429,7 +6469,7 @@ static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6429 spin_unlock_irqrestore(&priv->lock, flags); 6469 spin_unlock_irqrestore(&priv->lock, flags);
6430 return 0; 6470 return 0;
6431 6471
6432 fail_unlock: 6472 fail_unlock:
6433 spin_unlock_irqrestore(&priv->lock, flags); 6473 spin_unlock_irqrestore(&priv->lock, flags);
6434 return 1; 6474 return 1;
6435} 6475}
@@ -6478,7 +6518,7 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6478 len = sizeof(date); 6518 len = sizeof(date);
6479 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len); 6519 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
6480 6520
6481 snprintf(info->fw_version, sizeof(info->fw_version),"%s (%s)", 6521 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
6482 vers, date); 6522 vers, date);
6483 strcpy(info->bus_info, pci_name(p->pci_dev)); 6523 strcpy(info->bus_info, pci_name(p->pci_dev));
6484 info->eedump_len = CX2_EEPROM_IMAGE_SIZE; 6524 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
@@ -6496,19 +6536,19 @@ static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
6496} 6536}
6497 6537
6498static int ipw_ethtool_get_eeprom(struct net_device *dev, 6538static int ipw_ethtool_get_eeprom(struct net_device *dev,
6499 struct ethtool_eeprom *eeprom, u8 *bytes) 6539 struct ethtool_eeprom *eeprom, u8 * bytes)
6500{ 6540{
6501 struct ipw_priv *p = ieee80211_priv(dev); 6541 struct ipw_priv *p = ieee80211_priv(dev);
6502 6542
6503 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE) 6543 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6504 return -EINVAL; 6544 return -EINVAL;
6505 6545
6506 memcpy(bytes, &((u8 *)p->eeprom)[eeprom->offset], eeprom->len); 6546 memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
6507 return 0; 6547 return 0;
6508} 6548}
6509 6549
6510static int ipw_ethtool_set_eeprom(struct net_device *dev, 6550static int ipw_ethtool_set_eeprom(struct net_device *dev,
6511 struct ethtool_eeprom *eeprom, u8 *bytes) 6551 struct ethtool_eeprom *eeprom, u8 * bytes)
6512{ 6552{
6513 struct ipw_priv *p = ieee80211_priv(dev); 6553 struct ipw_priv *p = ieee80211_priv(dev);
6514 int i; 6554 int i;
@@ -6516,21 +6556,20 @@ static int ipw_ethtool_set_eeprom(struct net_device *dev,
6516 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE) 6556 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6517 return -EINVAL; 6557 return -EINVAL;
6518 6558
6519 memcpy(&((u8 *)p->eeprom)[eeprom->offset], bytes, eeprom->len); 6559 memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
6520 for (i = IPW_EEPROM_DATA; 6560 for (i = IPW_EEPROM_DATA;
6521 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; 6561 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
6522 i++)
6523 ipw_write8(p, i, p->eeprom[i]); 6562 ipw_write8(p, i, p->eeprom[i]);
6524 6563
6525 return 0; 6564 return 0;
6526} 6565}
6527 6566
6528static struct ethtool_ops ipw_ethtool_ops = { 6567static struct ethtool_ops ipw_ethtool_ops = {
6529 .get_link = ipw_ethtool_get_link, 6568 .get_link = ipw_ethtool_get_link,
6530 .get_drvinfo = ipw_ethtool_get_drvinfo, 6569 .get_drvinfo = ipw_ethtool_get_drvinfo,
6531 .get_eeprom_len = ipw_ethtool_get_eeprom_len, 6570 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
6532 .get_eeprom = ipw_ethtool_get_eeprom, 6571 .get_eeprom = ipw_ethtool_get_eeprom,
6533 .set_eeprom = ipw_ethtool_set_eeprom, 6572 .set_eeprom = ipw_ethtool_set_eeprom,
6534}; 6573};
6535 6574
6536static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs) 6575static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
@@ -6574,10 +6613,10 @@ static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
6574 6613
6575 tasklet_schedule(&priv->irq_tasklet); 6614 tasklet_schedule(&priv->irq_tasklet);
6576 6615
6577 spin_unlock(&priv->lock); 6616 spin_unlock(&priv->lock);
6578 6617
6579 return IRQ_HANDLED; 6618 return IRQ_HANDLED;
6580 none: 6619 none:
6581 spin_unlock(&priv->lock); 6620 spin_unlock(&priv->lock);
6582 return IRQ_NONE; 6621 return IRQ_NONE;
6583} 6622}
@@ -6609,7 +6648,7 @@ static void ipw_rf_kill(void *adapter)
6609 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " 6648 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6610 "enabled\n"); 6649 "enabled\n");
6611 6650
6612 exit_unlock: 6651 exit_unlock:
6613 spin_unlock_irqrestore(&priv->lock, flags); 6652 spin_unlock_irqrestore(&priv->lock, flags);
6614} 6653}
6615 6654
@@ -6642,7 +6681,6 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
6642 return ret; 6681 return ret;
6643} 6682}
6644 6683
6645
6646static void shim__set_security(struct net_device *dev, 6684static void shim__set_security(struct net_device *dev,
6647 struct ieee80211_security *sec) 6685 struct ieee80211_security *sec)
6648{ 6686{
@@ -6683,8 +6721,7 @@ static void shim__set_security(struct net_device *dev,
6683 priv->status |= STATUS_SECURITY_UPDATED; 6721 priv->status |= STATUS_SECURITY_UPDATED;
6684 } 6722 }
6685 6723
6686 if (sec->flags & SEC_ENABLED && 6724 if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
6687 priv->sec.enabled != sec->enabled) {
6688 priv->sec.flags |= SEC_ENABLED; 6725 priv->sec.flags |= SEC_ENABLED;
6689 priv->sec.enabled = sec->enabled; 6726 priv->sec.enabled = sec->enabled;
6690 priv->status |= STATUS_SECURITY_UPDATED; 6727 priv->status |= STATUS_SECURITY_UPDATED;
@@ -6694,8 +6731,7 @@ static void shim__set_security(struct net_device *dev,
6694 priv->capability &= ~CAP_PRIVACY_ON; 6731 priv->capability &= ~CAP_PRIVACY_ON;
6695 } 6732 }
6696 6733
6697 if (sec->flags & SEC_LEVEL && 6734 if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
6698 priv->sec.level != sec->level) {
6699 priv->sec.level = sec->level; 6735 priv->sec.level = sec->level;
6700 priv->sec.flags |= SEC_LEVEL; 6736 priv->sec.flags |= SEC_LEVEL;
6701 priv->status |= STATUS_SECURITY_UPDATED; 6737 priv->status |= STATUS_SECURITY_UPDATED;
@@ -6709,7 +6745,7 @@ static void shim__set_security(struct net_device *dev,
6709 (((priv->assoc_request.capability & 6745 (((priv->assoc_request.capability &
6710 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) || 6746 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
6711 (!(priv->assoc_request.capability & 6747 (!(priv->assoc_request.capability &
6712 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) { 6748 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
6713 IPW_DEBUG_ASSOC("Disassociating due to capability " 6749 IPW_DEBUG_ASSOC("Disassociating due to capability "
6714 "change.\n"); 6750 "change.\n");
6715 ipw_disassociate(priv); 6751 ipw_disassociate(priv);
@@ -6723,7 +6759,7 @@ static int init_supported_rates(struct ipw_priv *priv,
6723 /* TODO: Mask out rates based on priv->rates_mask */ 6759 /* TODO: Mask out rates based on priv->rates_mask */
6724 6760
6725 memset(rates, 0, sizeof(*rates)); 6761 memset(rates, 0, sizeof(*rates));
6726 /* configure supported rates */ 6762 /* configure supported rates */
6727 switch (priv->ieee->freq_band) { 6763 switch (priv->ieee->freq_band) {
6728 case IEEE80211_52GHZ_BAND: 6764 case IEEE80211_52GHZ_BAND:
6729 rates->ieee_mode = IPW_A_MODE; 6765 rates->ieee_mode = IPW_A_MODE;
@@ -6732,7 +6768,7 @@ static int init_supported_rates(struct ipw_priv *priv,
6732 IEEE80211_OFDM_DEFAULT_RATES_MASK); 6768 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6733 break; 6769 break;
6734 6770
6735 default: /* Mixed or 2.4Ghz */ 6771 default: /* Mixed or 2.4Ghz */
6736 rates->ieee_mode = IPW_G_MODE; 6772 rates->ieee_mode = IPW_G_MODE;
6737 rates->purpose = IPW_RATE_CAPABILITIES; 6773 rates->purpose = IPW_RATE_CAPABILITIES;
6738 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION, 6774 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
@@ -6783,8 +6819,8 @@ static int ipw_config(struct ipw_priv *priv)
6783 if (ipw_send_system_config(priv, &priv->sys_config)) 6819 if (ipw_send_system_config(priv, &priv->sys_config))
6784 goto error; 6820 goto error;
6785 6821
6786 init_supported_rates(priv, &priv->rates); 6822 init_supported_rates(priv, &priv->rates);
6787 if (ipw_send_supported_rates(priv, &priv->rates)) 6823 if (ipw_send_supported_rates(priv, &priv->rates))
6788 goto error; 6824 goto error;
6789 6825
6790 /* Set request-to-send threshold */ 6826 /* Set request-to-send threshold */
@@ -6806,7 +6842,7 @@ static int ipw_config(struct ipw_priv *priv)
6806 6842
6807 return 0; 6843 return 0;
6808 6844
6809 error: 6845 error:
6810 return -EIO; 6846 return -EIO;
6811} 6847}
6812 6848
@@ -6818,13 +6854,12 @@ static int ipw_up(struct ipw_priv *priv)
6818 if (priv->status & STATUS_EXIT_PENDING) 6854 if (priv->status & STATUS_EXIT_PENDING)
6819 return -EIO; 6855 return -EIO;
6820 6856
6821 for (i = 0; i < MAX_HW_RESTARTS; i++ ) { 6857 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6822 /* Load the microcode, firmware, and eeprom. 6858 /* Load the microcode, firmware, and eeprom.
6823 * Also start the clocks. */ 6859 * Also start the clocks. */
6824 rc = ipw_load(priv); 6860 rc = ipw_load(priv);
6825 if (rc) { 6861 if (rc) {
6826 IPW_ERROR("Unable to load firmware: 0x%08X\n", 6862 IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
6827 rc);
6828 return rc; 6863 return rc;
6829 } 6864 }
6830 6865
@@ -6857,8 +6892,7 @@ static int ipw_up(struct ipw_priv *priv)
6857 6892
6858 /* tried to restart and config the device for as long as our 6893 /* tried to restart and config the device for as long as our
6859 * patience could withstand */ 6894 * patience could withstand */
6860 IPW_ERROR("Unable to initialize device after %d attempts.\n", 6895 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
6861 i);
6862 return -EIO; 6896 return -EIO;
6863} 6897}
6864 6898
@@ -6923,10 +6957,10 @@ static struct pci_device_id card_ids[] = {
6923 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0}, 6957 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
6924 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0}, 6958 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
6925 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 6959 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
6926 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */ 6960 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
6927 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */ 6961 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
6928 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */ 6962 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6929 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */ 6963 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6930 6964
6931 /* required last entry */ 6965 /* required last entry */
6932 {0,} 6966 {0,}
@@ -6954,11 +6988,10 @@ static struct attribute *ipw_sysfs_entries[] = {
6954 6988
6955static struct attribute_group ipw_attribute_group = { 6989static struct attribute_group ipw_attribute_group = {
6956 .name = NULL, /* put in device directory */ 6990 .name = NULL, /* put in device directory */
6957 .attrs = ipw_sysfs_entries, 6991 .attrs = ipw_sysfs_entries,
6958}; 6992};
6959 6993
6960static int ipw_pci_probe(struct pci_dev *pdev, 6994static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6961 const struct pci_device_id *ent)
6962{ 6995{
6963 int err = 0; 6996 int err = 0;
6964 struct net_device *net_dev; 6997 struct net_device *net_dev;
@@ -7051,7 +7084,7 @@ static int ipw_pci_probe(struct pci_dev *pdev,
7051 priv->config |= CFG_STATIC_CHANNEL; 7084 priv->config |= CFG_STATIC_CHANNEL;
7052 priv->channel = channel; 7085 priv->channel = channel;
7053 IPW_DEBUG_INFO("Bind to static channel %d\n", channel); 7086 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7054 IPW_DEBUG_INFO("Bind to static channel %d\n", channel); 7087 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7055 /* TODO: Validate that provided channel is in range */ 7088 /* TODO: Validate that provided channel is in range */
7056 } 7089 }
7057 7090
@@ -7078,9 +7111,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
7078 priv->ieee->abg_ture = 1; 7111 priv->ieee->abg_ture = 1;
7079 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND; 7112 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7080 modulation = IEEE80211_OFDM_MODULATION | 7113 modulation = IEEE80211_OFDM_MODULATION |
7081 IEEE80211_CCK_MODULATION; 7114 IEEE80211_CCK_MODULATION;
7082 priv->adapter = IPW_2915ABG; 7115 priv->adapter = IPW_2915ABG;
7083 priv->ieee->mode = IEEE_A|IEEE_G|IEEE_B; 7116 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
7084 } else { 7117 } else {
7085 if (priv->pci_dev->device == 0x4221) 7118 if (priv->pci_dev->device == 0x4221)
7086 printk(KERN_INFO DRV_NAME 7119 printk(KERN_INFO DRV_NAME
@@ -7094,9 +7127,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
7094 priv->ieee->abg_ture = 0; 7127 priv->ieee->abg_ture = 0;
7095 band = IEEE80211_24GHZ_BAND; 7128 band = IEEE80211_24GHZ_BAND;
7096 modulation = IEEE80211_OFDM_MODULATION | 7129 modulation = IEEE80211_OFDM_MODULATION |
7097 IEEE80211_CCK_MODULATION; 7130 IEEE80211_CCK_MODULATION;
7098 priv->adapter = IPW_2200BG; 7131 priv->adapter = IPW_2200BG;
7099 priv->ieee->mode = IEEE_G|IEEE_B; 7132 priv->ieee->mode = IEEE_G | IEEE_B;
7100 } 7133 }
7101 7134
7102 priv->ieee->freq_band = band; 7135 priv->ieee->freq_band = band;
@@ -7110,11 +7143,10 @@ static int ipw_pci_probe(struct pci_dev *pdev,
7110 priv->rts_threshold = DEFAULT_RTS_THRESHOLD; 7143 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7111 7144
7112 /* If power management is turned on, default to AC mode */ 7145 /* If power management is turned on, default to AC mode */
7113 priv->power_mode = IPW_POWER_AC; 7146 priv->power_mode = IPW_POWER_AC;
7114 priv->tx_power = IPW_DEFAULT_TX_POWER; 7147 priv->tx_power = IPW_DEFAULT_TX_POWER;
7115 7148
7116 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, 7149 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
7117 priv);
7118 if (err) { 7150 if (err) {
7119 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); 7151 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7120 goto out_destroy_workqueue; 7152 goto out_destroy_workqueue;
@@ -7136,7 +7168,7 @@ static int ipw_pci_probe(struct pci_dev *pdev,
7136 net_dev->wireless_handlers = &ipw_wx_handler_def; 7168 net_dev->wireless_handlers = &ipw_wx_handler_def;
7137 net_dev->ethtool_ops = &ipw_ethtool_ops; 7169 net_dev->ethtool_ops = &ipw_ethtool_ops;
7138 net_dev->irq = pdev->irq; 7170 net_dev->irq = pdev->irq;
7139 net_dev->base_addr = (unsigned long )priv->hw_base; 7171 net_dev->base_addr = (unsigned long)priv->hw_base;
7140 net_dev->mem_start = pci_resource_start(pdev, 0); 7172 net_dev->mem_start = pci_resource_start(pdev, 0);
7141 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1; 7173 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7142 7174
@@ -7154,23 +7186,23 @@ static int ipw_pci_probe(struct pci_dev *pdev,
7154 7186
7155 return 0; 7187 return 0;
7156 7188
7157 out_remove_group: 7189 out_remove_group:
7158 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); 7190 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7159 out_release_irq: 7191 out_release_irq:
7160 free_irq(pdev->irq, priv); 7192 free_irq(pdev->irq, priv);
7161 out_destroy_workqueue: 7193 out_destroy_workqueue:
7162 destroy_workqueue(priv->workqueue); 7194 destroy_workqueue(priv->workqueue);
7163 priv->workqueue = NULL; 7195 priv->workqueue = NULL;
7164 out_iounmap: 7196 out_iounmap:
7165 iounmap(priv->hw_base); 7197 iounmap(priv->hw_base);
7166 out_pci_release_regions: 7198 out_pci_release_regions:
7167 pci_release_regions(pdev); 7199 pci_release_regions(pdev);
7168 out_pci_disable_device: 7200 out_pci_disable_device:
7169 pci_disable_device(pdev); 7201 pci_disable_device(pdev);
7170 pci_set_drvdata(pdev, NULL); 7202 pci_set_drvdata(pdev, NULL);
7171 out_free_ieee80211: 7203 out_free_ieee80211:
7172 free_ieee80211(priv->net_dev); 7204 free_ieee80211(priv->net_dev);
7173 out: 7205 out:
7174 return err; 7206 return err;
7175} 7207}
7176 7208
@@ -7223,7 +7255,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
7223#endif 7255#endif
7224} 7256}
7225 7257
7226
7227#ifdef CONFIG_PM 7258#ifdef CONFIG_PM
7228static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) 7259static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
7229{ 7260{
@@ -7232,7 +7263,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
7232 7263
7233 printk(KERN_INFO "%s: Going into suspend...\n", dev->name); 7264 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
7234 7265
7235 /* Take down the device; powers it off, etc. */ 7266 /* Take down the device; powers it off, etc. */
7236 ipw_down(priv); 7267 ipw_down(priv);
7237 7268
7238 /* Remove the PRESENT state of the device */ 7269 /* Remove the PRESENT state of the device */
@@ -7306,8 +7337,7 @@ static int __init ipw_init(void)
7306 return ret; 7337 return ret;
7307 } 7338 }
7308 7339
7309 ret = driver_create_file(&ipw_driver.driver, 7340 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
7310 &driver_attr_debug_level);
7311 if (ret) { 7341 if (ret) {
7312 IPW_ERROR("Unable to create driver sysfs file\n"); 7342 IPW_ERROR("Unable to create driver sysfs file\n");
7313 pci_unregister_driver(&ipw_driver); 7343 pci_unregister_driver(&ipw_driver);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 66bb5903537f..5b00882133f9 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -56,8 +56,7 @@
56#include <linux/workqueue.h> 56#include <linux/workqueue.h>
57 57
58/* Authentication and Association States */ 58/* Authentication and Association States */
59enum connection_manager_assoc_states 59enum connection_manager_assoc_states {
60{
61 CMAS_INIT = 0, 60 CMAS_INIT = 0,
62 CMAS_TX_AUTH_SEQ_1, 61 CMAS_TX_AUTH_SEQ_1,
63 CMAS_RX_AUTH_SEQ_2, 62 CMAS_RX_AUTH_SEQ_2,
@@ -74,7 +73,6 @@ enum connection_manager_assoc_states
74 CMAS_LAST 73 CMAS_LAST
75}; 74};
76 75
77
78#define IPW_WAIT (1<<0) 76#define IPW_WAIT (1<<0)
79#define IPW_QUIET (1<<1) 77#define IPW_QUIET (1<<1)
80#define IPW_ROAMING (1<<2) 78#define IPW_ROAMING (1<<2)
@@ -190,7 +188,6 @@ enum connection_manager_assoc_states
190#define DCT_FLAG_EXT_MODE_CCK 0x01 188#define DCT_FLAG_EXT_MODE_CCK 0x01
191#define DCT_FLAG_EXT_MODE_OFDM 0x00 189#define DCT_FLAG_EXT_MODE_OFDM 0x00
192 190
193
194#define TX_RX_TYPE_MASK 0xFF 191#define TX_RX_TYPE_MASK 0xFF
195#define TX_FRAME_TYPE 0x00 192#define TX_FRAME_TYPE 0x00
196#define TX_HOST_COMMAND_TYPE 0x01 193#define TX_HOST_COMMAND_TYPE 0x01
@@ -242,107 +239,97 @@ enum connection_manager_assoc_states
242 * Contains common data for Rx and Tx queues 239 * Contains common data for Rx and Tx queues
243 */ 240 */
244struct clx2_queue { 241struct clx2_queue {
245 int n_bd; /**< number of BDs in this queue */ 242 int n_bd; /**< number of BDs in this queue */
246 int first_empty; /**< 1-st empty entry (index) */ 243 int first_empty; /**< 1-st empty entry (index) */
247 int last_used; /**< last used entry (index) */ 244 int last_used; /**< last used entry (index) */
248 u32 reg_w; /**< 'write' reg (queue head), addr in domain 1 */ 245 u32 reg_w; /**< 'write' reg (queue head), addr in domain 1 */
249 u32 reg_r; /**< 'read' reg (queue tail), addr in domain 1 */ 246 u32 reg_r; /**< 'read' reg (queue tail), addr in domain 1 */
250 dma_addr_t dma_addr; /**< physical addr for BD's */ 247 dma_addr_t dma_addr; /**< physical addr for BD's */
251 int low_mark; /**< low watermark, resume queue if free space more than this */ 248 int low_mark; /**< low watermark, resume queue if free space more than this */
252 int high_mark; /**< high watermark, stop queue if free space less than this */ 249 int high_mark; /**< high watermark, stop queue if free space less than this */
253} __attribute__ ((packed)); 250} __attribute__ ((packed));
254 251
255struct machdr32 252struct machdr32 {
256{
257 u16 frame_ctl; 253 u16 frame_ctl;
258 u16 duration; // watch out for endians! 254 u16 duration; // watch out for endians!
259 u8 addr1[ MACADRR_BYTE_LEN ]; 255 u8 addr1[MACADRR_BYTE_LEN];
260 u8 addr2[ MACADRR_BYTE_LEN ]; 256 u8 addr2[MACADRR_BYTE_LEN];
261 u8 addr3[ MACADRR_BYTE_LEN ]; 257 u8 addr3[MACADRR_BYTE_LEN];
262 u16 seq_ctrl; // more endians! 258 u16 seq_ctrl; // more endians!
263 u8 addr4[ MACADRR_BYTE_LEN ]; 259 u8 addr4[MACADRR_BYTE_LEN];
264 u16 qos_ctrl; 260 u16 qos_ctrl;
265} __attribute__ ((packed)) ; 261} __attribute__ ((packed));
266 262
267struct machdr30 263struct machdr30 {
268{
269 u16 frame_ctl; 264 u16 frame_ctl;
270 u16 duration; // watch out for endians! 265 u16 duration; // watch out for endians!
271 u8 addr1[ MACADRR_BYTE_LEN ]; 266 u8 addr1[MACADRR_BYTE_LEN];
272 u8 addr2[ MACADRR_BYTE_LEN ]; 267 u8 addr2[MACADRR_BYTE_LEN];
273 u8 addr3[ MACADRR_BYTE_LEN ]; 268 u8 addr3[MACADRR_BYTE_LEN];
274 u16 seq_ctrl; // more endians! 269 u16 seq_ctrl; // more endians!
275 u8 addr4[ MACADRR_BYTE_LEN ]; 270 u8 addr4[MACADRR_BYTE_LEN];
276} __attribute__ ((packed)) ; 271} __attribute__ ((packed));
277 272
278struct machdr26 273struct machdr26 {
279{
280 u16 frame_ctl; 274 u16 frame_ctl;
281 u16 duration; // watch out for endians! 275 u16 duration; // watch out for endians!
282 u8 addr1[ MACADRR_BYTE_LEN ]; 276 u8 addr1[MACADRR_BYTE_LEN];
283 u8 addr2[ MACADRR_BYTE_LEN ]; 277 u8 addr2[MACADRR_BYTE_LEN];
284 u8 addr3[ MACADRR_BYTE_LEN ]; 278 u8 addr3[MACADRR_BYTE_LEN];
285 u16 seq_ctrl; // more endians! 279 u16 seq_ctrl; // more endians!
286 u16 qos_ctrl; 280 u16 qos_ctrl;
287} __attribute__ ((packed)) ; 281} __attribute__ ((packed));
288 282
289struct machdr24 283struct machdr24 {
290{
291 u16 frame_ctl; 284 u16 frame_ctl;
292 u16 duration; // watch out for endians! 285 u16 duration; // watch out for endians!
293 u8 addr1[ MACADRR_BYTE_LEN ]; 286 u8 addr1[MACADRR_BYTE_LEN];
294 u8 addr2[ MACADRR_BYTE_LEN ]; 287 u8 addr2[MACADRR_BYTE_LEN];
295 u8 addr3[ MACADRR_BYTE_LEN ]; 288 u8 addr3[MACADRR_BYTE_LEN];
296 u16 seq_ctrl; // more endians! 289 u16 seq_ctrl; // more endians!
297} __attribute__ ((packed)) ; 290} __attribute__ ((packed));
298 291
299// TX TFD with 32 byte MAC Header 292// TX TFD with 32 byte MAC Header
300struct tx_tfd_32 293struct tx_tfd_32 {
301{ 294 struct machdr32 mchdr; // 32
302 struct machdr32 mchdr; // 32 295 u32 uivplaceholder[2]; // 8
303 u32 uivplaceholder[2]; // 8 296} __attribute__ ((packed));
304} __attribute__ ((packed)) ;
305 297
306// TX TFD with 30 byte MAC Header 298// TX TFD with 30 byte MAC Header
307struct tx_tfd_30 299struct tx_tfd_30 {
308{ 300 struct machdr30 mchdr; // 30
309 struct machdr30 mchdr; // 30 301 u8 reserved[2]; // 2
310 u8 reserved[2]; // 2 302 u32 uivplaceholder[2]; // 8
311 u32 uivplaceholder[2]; // 8 303} __attribute__ ((packed));
312} __attribute__ ((packed)) ;
313 304
314// tx tfd with 26 byte mac header 305// tx tfd with 26 byte mac header
315struct tx_tfd_26 306struct tx_tfd_26 {
316{ 307 struct machdr26 mchdr; // 26
317 struct machdr26 mchdr; // 26 308 u8 reserved1[2]; // 2
318 u8 reserved1[2]; // 2 309 u32 uivplaceholder[2]; // 8
319 u32 uivplaceholder[2]; // 8 310 u8 reserved2[4]; // 4
320 u8 reserved2[4]; // 4 311} __attribute__ ((packed));
321} __attribute__ ((packed)) ;
322 312
323// tx tfd with 24 byte mac header 313// tx tfd with 24 byte mac header
324struct tx_tfd_24 314struct tx_tfd_24 {
325{ 315 struct machdr24 mchdr; // 24
326 struct machdr24 mchdr; // 24 316 u32 uivplaceholder[2]; // 8
327 u32 uivplaceholder[2]; // 8 317 u8 reserved[8]; // 8
328 u8 reserved[8]; // 8 318} __attribute__ ((packed));
329} __attribute__ ((packed)) ;
330
331 319
332#define DCT_WEP_KEY_FIELD_LENGTH 16 320#define DCT_WEP_KEY_FIELD_LENGTH 16
333 321
334struct tfd_command 322struct tfd_command {
335{
336 u8 index; 323 u8 index;
337 u8 length; 324 u8 length;
338 u16 reserved; 325 u16 reserved;
339 u8 payload[0]; 326 u8 payload[0];
340} __attribute__ ((packed)) ; 327} __attribute__ ((packed));
341 328
342struct tfd_data { 329struct tfd_data {
343 /* Header */ 330 /* Header */
344 u32 work_area_ptr; 331 u32 work_area_ptr;
345 u8 station_number; /* 0 for BSS */ 332 u8 station_number; /* 0 for BSS */
346 u8 reserved1; 333 u8 reserved1;
347 u16 reserved2; 334 u16 reserved2;
348 335
@@ -359,14 +346,13 @@ struct tfd_data {
359 u8 antenna; 346 u8 antenna;
360 u16 next_packet_duration; 347 u16 next_packet_duration;
361 u16 next_frag_len; 348 u16 next_frag_len;
362 u16 back_off_counter; //////txop; 349 u16 back_off_counter; //////txop;
363 u8 retrylimit; 350 u8 retrylimit;
364 u16 cwcurrent; 351 u16 cwcurrent;
365 u8 reserved3; 352 u8 reserved3;
366 353
367 /* 802.11 MAC Header */ 354 /* 802.11 MAC Header */
368 union 355 union {
369 {
370 struct tx_tfd_24 tfd_24; 356 struct tx_tfd_24 tfd_24;
371 struct tx_tfd_26 tfd_26; 357 struct tx_tfd_26 tfd_26;
372 struct tx_tfd_30 tfd_30; 358 struct tx_tfd_30 tfd_30;
@@ -379,8 +365,7 @@ struct tfd_data {
379 u16 chunk_len[NUM_TFD_CHUNKS]; 365 u16 chunk_len[NUM_TFD_CHUNKS];
380} __attribute__ ((packed)); 366} __attribute__ ((packed));
381 367
382struct txrx_control_flags 368struct txrx_control_flags {
383{
384 u8 message_type; 369 u8 message_type;
385 u8 rx_seq_num; 370 u8 rx_seq_num;
386 u8 control_bits; 371 u8 control_bits;
@@ -390,17 +375,16 @@ struct txrx_control_flags
390#define TFD_SIZE 128 375#define TFD_SIZE 128
391#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags)) 376#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags))
392 377
393struct tfd_frame 378struct tfd_frame {
394{
395 struct txrx_control_flags control_flags; 379 struct txrx_control_flags control_flags;
396 union { 380 union {
397 struct tfd_data data; 381 struct tfd_data data;
398 struct tfd_command cmd; 382 struct tfd_command cmd;
399 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH]; 383 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
400 } u; 384 } u;
401} __attribute__ ((packed)) ; 385} __attribute__ ((packed));
402 386
403typedef void destructor_func(const void*); 387typedef void destructor_func(const void *);
404 388
405/** 389/**
406 * Tx Queue for DMA. Queue consists of circular buffer of 390 * Tx Queue for DMA. Queue consists of circular buffer of
@@ -408,7 +392,7 @@ typedef void destructor_func(const void*);
408 */ 392 */
409struct clx2_tx_queue { 393struct clx2_tx_queue {
410 struct clx2_queue q; 394 struct clx2_queue q;
411 struct tfd_frame* bd; 395 struct tfd_frame *bd;
412 struct ieee80211_txb **txb; 396 struct ieee80211_txb **txb;
413}; 397};
414 398
@@ -423,8 +407,7 @@ struct clx2_tx_queue {
423#define SUP_RATE_11G_MAX_NUM_CHANNELS (12) 407#define SUP_RATE_11G_MAX_NUM_CHANNELS (12)
424 408
425// Used for passing to driver number of successes and failures per rate 409// Used for passing to driver number of successes and failures per rate
426struct rate_histogram 410struct rate_histogram {
427{
428 union { 411 union {
429 u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; 412 u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
430 u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; 413 u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
@@ -475,12 +458,12 @@ struct notif_scan_complete {
475 u8 num_channels; 458 u8 num_channels;
476 u8 status; 459 u8 status;
477 u8 reserved; 460 u8 reserved;
478} __attribute__ ((packed)); 461} __attribute__ ((packed));
479 462
480struct notif_frag_length { 463struct notif_frag_length {
481 u16 frag_length; 464 u16 frag_length;
482 u16 reserved; 465 u16 reserved;
483} __attribute__ ((packed)); 466} __attribute__ ((packed));
484 467
485struct notif_beacon_state { 468struct notif_beacon_state {
486 u32 state; 469 u32 state;
@@ -543,11 +526,11 @@ struct ipw_rx_notification {
543 526
544struct ipw_rx_frame { 527struct ipw_rx_frame {
545 u32 reserved1; 528 u32 reserved1;
546 u8 parent_tsf[4]; // fw_use[0] is boolean for OUR_TSF_IS_GREATER 529 u8 parent_tsf[4]; // fw_use[0] is boolean for OUR_TSF_IS_GREATER
547 u8 received_channel; // The channel that this frame was received on. 530 u8 received_channel; // The channel that this frame was received on.
548 // Note that for .11b this does not have to be 531 // Note that for .11b this does not have to be
549 // the same as the channel that it was sent. 532 // the same as the channel that it was sent.
550 // Filled by LMAC 533 // Filled by LMAC
551 u8 frameStatus; 534 u8 frameStatus;
552 u8 rate; 535 u8 rate;
553 u8 rssi; 536 u8 rssi;
@@ -556,10 +539,10 @@ struct ipw_rx_frame {
556 u16 signal; 539 u16 signal;
557 u16 noise; 540 u16 noise;
558 u8 antennaAndPhy; 541 u8 antennaAndPhy;
559 u8 control; // control bit should be on in bg 542 u8 control; // control bit should be on in bg
560 u8 rtscts_rate; // rate of rts or cts (in rts cts sequence rate 543 u8 rtscts_rate; // rate of rts or cts (in rts cts sequence rate
561 // is identical) 544 // is identical)
562 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen 545 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
563 u16 length; 546 u16 length;
564 u8 data[0]; 547 u8 data[0];
565} __attribute__ ((packed)); 548} __attribute__ ((packed));
@@ -571,8 +554,7 @@ struct ipw_rx_header {
571 u8 reserved; 554 u8 reserved;
572} __attribute__ ((packed)); 555} __attribute__ ((packed));
573 556
574struct ipw_rx_packet 557struct ipw_rx_packet {
575{
576 struct ipw_rx_header header; 558 struct ipw_rx_header header;
577 union { 559 union {
578 struct ipw_rx_frame frame; 560 struct ipw_rx_frame frame;
@@ -589,21 +571,20 @@ struct ipw_rx_mem_buffer {
589 struct ipw_rx_buffer *rxb; 571 struct ipw_rx_buffer *rxb;
590 struct sk_buff *skb; 572 struct sk_buff *skb;
591 struct list_head list; 573 struct list_head list;
592}; /* Not transferred over network, so not __attribute__ ((packed)) */ 574}; /* Not transferred over network, so not __attribute__ ((packed)) */
593 575
594struct ipw_rx_queue { 576struct ipw_rx_queue {
595 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 577 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
596 struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 578 struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE];
597 u32 processed; /* Internal index to last handled Rx packet */ 579 u32 processed; /* Internal index to last handled Rx packet */
598 u32 read; /* Shared index to newest available Rx buffer */ 580 u32 read; /* Shared index to newest available Rx buffer */
599 u32 write; /* Shared index to oldest written Rx packet */ 581 u32 write; /* Shared index to oldest written Rx packet */
600 u32 free_count;/* Number of pre-allocated buffers in rx_free */ 582 u32 free_count; /* Number of pre-allocated buffers in rx_free */
601 /* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */ 583 /* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */
602 struct list_head rx_free; /* Own an SKBs */ 584 struct list_head rx_free; /* Own an SKBs */
603 struct list_head rx_used; /* No SKB allocated */ 585 struct list_head rx_used; /* No SKB allocated */
604 spinlock_t lock; 586 spinlock_t lock;
605}; /* Not transferred over network, so not __attribute__ ((packed)) */ 587}; /* Not transferred over network, so not __attribute__ ((packed)) */
606
607 588
608struct alive_command_responce { 589struct alive_command_responce {
609 u8 alive_command; 590 u8 alive_command;
@@ -627,8 +608,7 @@ struct ipw_rates {
627 u8 rates[IPW_MAX_RATES]; 608 u8 rates[IPW_MAX_RATES];
628} __attribute__ ((packed)); 609} __attribute__ ((packed));
629 610
630struct command_block 611struct command_block {
631{
632 unsigned int control; 612 unsigned int control;
633 u32 source_addr; 613 u32 source_addr;
634 u32 dest_addr; 614 u32 dest_addr;
@@ -636,18 +616,16 @@ struct command_block
636} __attribute__ ((packed)); 616} __attribute__ ((packed));
637 617
638#define CB_NUMBER_OF_ELEMENTS_SMALL 64 618#define CB_NUMBER_OF_ELEMENTS_SMALL 64
639struct fw_image_desc 619struct fw_image_desc {
640{
641 unsigned long last_cb_index; 620 unsigned long last_cb_index;
642 unsigned long current_cb_index; 621 unsigned long current_cb_index;
643 struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL]; 622 struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL];
644 void * v_addr; 623 void *v_addr;
645 unsigned long p_addr; 624 unsigned long p_addr;
646 unsigned long len; 625 unsigned long len;
647}; 626};
648 627
649struct ipw_sys_config 628struct ipw_sys_config {
650{
651 u8 bt_coexistence; 629 u8 bt_coexistence;
652 u8 reserved1; 630 u8 reserved1;
653 u8 answer_broadcast_ssid_probe; 631 u8 answer_broadcast_ssid_probe;
@@ -670,8 +648,7 @@ struct ipw_sys_config
670 u8 reserved3; 648 u8 reserved3;
671} __attribute__ ((packed)); 649} __attribute__ ((packed));
672 650
673struct ipw_multicast_addr 651struct ipw_multicast_addr {
674{
675 u8 num_of_multicast_addresses; 652 u8 num_of_multicast_addresses;
676 u8 reserved[3]; 653 u8 reserved[3];
677 u8 mac1[6]; 654 u8 mac1[6];
@@ -680,8 +657,7 @@ struct ipw_multicast_addr
680 u8 mac4[6]; 657 u8 mac4[6];
681} __attribute__ ((packed)); 658} __attribute__ ((packed));
682 659
683struct ipw_wep_key 660struct ipw_wep_key {
684{
685 u8 cmd_id; 661 u8 cmd_id;
686 u8 seq_num; 662 u8 seq_num;
687 u8 key_index; 663 u8 key_index;
@@ -689,8 +665,7 @@ struct ipw_wep_key
689 u8 key[16]; 665 u8 key[16];
690} __attribute__ ((packed)); 666} __attribute__ ((packed));
691 667
692struct ipw_tgi_tx_key 668struct ipw_tgi_tx_key {
693{
694 u8 key_id; 669 u8 key_id;
695 u8 security_type; 670 u8 security_type;
696 u8 station_index; 671 u8 station_index;
@@ -701,8 +676,7 @@ struct ipw_tgi_tx_key
701 676
702#define IPW_SCAN_CHANNELS 54 677#define IPW_SCAN_CHANNELS 54
703 678
704struct ipw_scan_request 679struct ipw_scan_request {
705{
706 u8 scan_type; 680 u8 scan_type;
707 u16 dwell_time; 681 u16 dwell_time;
708 u8 channels_list[IPW_SCAN_CHANNELS]; 682 u8 channels_list[IPW_SCAN_CHANNELS];
@@ -718,8 +692,7 @@ enum {
718 IPW_SCAN_TYPES 692 IPW_SCAN_TYPES
719}; 693};
720 694
721struct ipw_scan_request_ext 695struct ipw_scan_request_ext {
722{
723 u32 full_scan_index; 696 u32 full_scan_index;
724 u8 channels_list[IPW_SCAN_CHANNELS]; 697 u8 channels_list[IPW_SCAN_CHANNELS];
725 u8 scan_type[IPW_SCAN_CHANNELS / 2]; 698 u8 scan_type[IPW_SCAN_CHANNELS / 2];
@@ -740,19 +713,16 @@ extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
740{ 713{
741 if (index % 2) 714 if (index % 2)
742 scan->scan_type[index / 2] = 715 scan->scan_type[index / 2] =
743 (scan->scan_type[index / 2] & 0xF0) | 716 (scan->scan_type[index / 2] & 0xF0) | (scan_type & 0x0F);
744 (scan_type & 0x0F);
745 else 717 else
746 scan->scan_type[index / 2] = 718 scan->scan_type[index / 2] =
747 (scan->scan_type[index / 2] & 0x0F) | 719 (scan->scan_type[index / 2] & 0x0F) |
748 ((scan_type & 0x0F) << 4); 720 ((scan_type & 0x0F) << 4);
749} 721}
750 722
751struct ipw_associate 723struct ipw_associate {
752{
753 u8 channel; 724 u8 channel;
754 u8 auth_type:4, 725 u8 auth_type:4, auth_key:4;
755 auth_key:4;
756 u8 assoc_type; 726 u8 assoc_type;
757 u8 reserved; 727 u8 reserved;
758 u16 policy_support; 728 u16 policy_support;
@@ -771,8 +741,7 @@ struct ipw_associate
771 u16 reserved2; 741 u16 reserved2;
772} __attribute__ ((packed)); 742} __attribute__ ((packed));
773 743
774struct ipw_supported_rates 744struct ipw_supported_rates {
775{
776 u8 ieee_mode; 745 u8 ieee_mode;
777 u8 num_rates; 746 u8 num_rates;
778 u8 purpose; 747 u8 purpose;
@@ -780,42 +749,36 @@ struct ipw_supported_rates
780 u8 supported_rates[IPW_MAX_RATES]; 749 u8 supported_rates[IPW_MAX_RATES];
781} __attribute__ ((packed)); 750} __attribute__ ((packed));
782 751
783struct ipw_rts_threshold 752struct ipw_rts_threshold {
784{
785 u16 rts_threshold; 753 u16 rts_threshold;
786 u16 reserved; 754 u16 reserved;
787} __attribute__ ((packed)); 755} __attribute__ ((packed));
788 756
789struct ipw_frag_threshold 757struct ipw_frag_threshold {
790{
791 u16 frag_threshold; 758 u16 frag_threshold;
792 u16 reserved; 759 u16 reserved;
793} __attribute__ ((packed)); 760} __attribute__ ((packed));
794 761
795struct ipw_retry_limit 762struct ipw_retry_limit {
796{
797 u8 short_retry_limit; 763 u8 short_retry_limit;
798 u8 long_retry_limit; 764 u8 long_retry_limit;
799 u16 reserved; 765 u16 reserved;
800} __attribute__ ((packed)); 766} __attribute__ ((packed));
801 767
802struct ipw_dino_config 768struct ipw_dino_config {
803{
804 u32 dino_config_addr; 769 u32 dino_config_addr;
805 u16 dino_config_size; 770 u16 dino_config_size;
806 u8 dino_response; 771 u8 dino_response;
807 u8 reserved; 772 u8 reserved;
808} __attribute__ ((packed)); 773} __attribute__ ((packed));
809 774
810struct ipw_aironet_info 775struct ipw_aironet_info {
811{
812 u8 id; 776 u8 id;
813 u8 length; 777 u8 length;
814 u16 reserved; 778 u16 reserved;
815} __attribute__ ((packed)); 779} __attribute__ ((packed));
816 780
817struct ipw_rx_key 781struct ipw_rx_key {
818{
819 u8 station_index; 782 u8 station_index;
820 u8 key_type; 783 u8 key_type;
821 u8 key_id; 784 u8 key_id;
@@ -826,23 +789,20 @@ struct ipw_rx_key
826 u8 reserved; 789 u8 reserved;
827} __attribute__ ((packed)); 790} __attribute__ ((packed));
828 791
829struct ipw_country_channel_info 792struct ipw_country_channel_info {
830{
831 u8 first_channel; 793 u8 first_channel;
832 u8 no_channels; 794 u8 no_channels;
833 s8 max_tx_power; 795 s8 max_tx_power;
834} __attribute__ ((packed)); 796} __attribute__ ((packed));
835 797
836struct ipw_country_info 798struct ipw_country_info {
837{
838 u8 id; 799 u8 id;
839 u8 length; 800 u8 length;
840 u8 country_str[3]; 801 u8 country_str[3];
841 struct ipw_country_channel_info groups[7]; 802 struct ipw_country_channel_info groups[7];
842} __attribute__ ((packed)); 803} __attribute__ ((packed));
843 804
844struct ipw_channel_tx_power 805struct ipw_channel_tx_power {
845{
846 u8 channel_number; 806 u8 channel_number;
847 s8 tx_power; 807 s8 tx_power;
848} __attribute__ ((packed)); 808} __attribute__ ((packed));
@@ -852,15 +812,13 @@ struct ipw_channel_tx_power
852#define MAX_A_CHANNELS 37 812#define MAX_A_CHANNELS 37
853#define MAX_B_CHANNELS 14 813#define MAX_B_CHANNELS 14
854 814
855struct ipw_tx_power 815struct ipw_tx_power {
856{
857 u8 num_channels; 816 u8 num_channels;
858 u8 ieee_mode; 817 u8 ieee_mode;
859 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS]; 818 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
860} __attribute__ ((packed)); 819} __attribute__ ((packed));
861 820
862struct ipw_qos_parameters 821struct ipw_qos_parameters {
863{
864 u16 cw_min[4]; 822 u16 cw_min[4];
865 u16 cw_max[4]; 823 u16 cw_max[4];
866 u8 aifs[4]; 824 u8 aifs[4];
@@ -868,15 +826,13 @@ struct ipw_qos_parameters
868 u16 tx_op_limit[4]; 826 u16 tx_op_limit[4];
869} __attribute__ ((packed)); 827} __attribute__ ((packed));
870 828
871struct ipw_rsn_capabilities 829struct ipw_rsn_capabilities {
872{
873 u8 id; 830 u8 id;
874 u8 length; 831 u8 length;
875 u16 version; 832 u16 version;
876} __attribute__ ((packed)); 833} __attribute__ ((packed));
877 834
878struct ipw_sensitivity_calib 835struct ipw_sensitivity_calib {
879{
880 u16 beacon_rssi_raw; 836 u16 beacon_rssi_raw;
881 u16 reserved; 837 u16 reserved;
882} __attribute__ ((packed)); 838} __attribute__ ((packed));
@@ -895,10 +851,11 @@ struct ipw_sensitivity_calib
895 * - \a param filled with status parameters. 851 * - \a param filled with status parameters.
896 */ 852 */
897struct ipw_cmd { 853struct ipw_cmd {
898 u32 cmd; /**< Host command */ 854 u32 cmd; /**< Host command */
899 u32 status; /**< Status */ 855 u32 status;/**< Status */
900 u32 status_len; /**< How many 32 bit parameters in the status */ 856 u32 status_len;
901 u32 len; /**< incoming parameters length, bytes */ 857 /**< How many 32 bit parameters in the status */
858 u32 len; /**< incoming parameters length, bytes */
902 /** 859 /**
903 * command parameters. 860 * command parameters.
904 * There should be enough space for incoming and 861 * There should be enough space for incoming and
@@ -906,10 +863,10 @@ struct ipw_cmd {
906 * Incoming parameters listed 1-st, followed by outcoming params. 863 * Incoming parameters listed 1-st, followed by outcoming params.
907 * nParams=(len+3)/4+status_len 864 * nParams=(len+3)/4+status_len
908 */ 865 */
909 u32 param[0]; 866 u32 param[0];
910} __attribute__ ((packed)); 867} __attribute__ ((packed));
911 868
912#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */ 869#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
913 870
914#define STATUS_INT_ENABLED (1<<1) 871#define STATUS_INT_ENABLED (1<<1)
915#define STATUS_RF_KILL_HW (1<<2) 872#define STATUS_RF_KILL_HW (1<<2)
@@ -932,15 +889,15 @@ struct ipw_cmd {
932#define STATUS_SCANNING (1<<21) 889#define STATUS_SCANNING (1<<21)
933#define STATUS_SCAN_ABORTING (1<<22) 890#define STATUS_SCAN_ABORTING (1<<22)
934 891
935#define STATUS_INDIRECT_BYTE (1<<28) /* sysfs entry configured for access */ 892#define STATUS_INDIRECT_BYTE (1<<28) /* sysfs entry configured for access */
936#define STATUS_INDIRECT_DWORD (1<<29) /* sysfs entry configured for access */ 893#define STATUS_INDIRECT_DWORD (1<<29) /* sysfs entry configured for access */
937#define STATUS_DIRECT_DWORD (1<<30) /* sysfs entry configured for access */ 894#define STATUS_DIRECT_DWORD (1<<30) /* sysfs entry configured for access */
938 895
939#define STATUS_SECURITY_UPDATED (1<<31) /* Security sync needed */ 896#define STATUS_SECURITY_UPDATED (1<<31) /* Security sync needed */
940 897
941#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */ 898#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */
942#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */ 899#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */
943#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */ 900#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */
944#define CFG_CUSTOM_MAC (1<<3) 901#define CFG_CUSTOM_MAC (1<<3)
945#define CFG_PREAMBLE (1<<4) 902#define CFG_PREAMBLE (1<<4)
946#define CFG_ADHOC_PERSIST (1<<5) 903#define CFG_ADHOC_PERSIST (1<<5)
@@ -948,8 +905,8 @@ struct ipw_cmd {
948#define CFG_FIXED_RATE (1<<7) 905#define CFG_FIXED_RATE (1<<7)
949#define CFG_ADHOC_CREATE (1<<8) 906#define CFG_ADHOC_CREATE (1<<8)
950 907
951#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */ 908#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */
952#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ 909#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
953 910
954#define MAX_STATIONS 32 911#define MAX_STATIONS 32
955#define IPW_INVALID_STATION (0xff) 912#define IPW_INVALID_STATION (0xff)
@@ -989,8 +946,8 @@ struct ipw_priv {
989 /* result of ucode download */ 946 /* result of ucode download */
990 struct alive_command_responce dino_alive; 947 struct alive_command_responce dino_alive;
991 948
992 wait_queue_head_t wait_command_queue; 949 wait_queue_head_t wait_command_queue;
993 wait_queue_head_t wait_state; 950 wait_queue_head_t wait_state;
994 951
995 /* Rx and Tx DMA processing queues */ 952 /* Rx and Tx DMA processing queues */
996 struct ipw_rx_queue *rxq; 953 struct ipw_rx_queue *rxq;
@@ -1006,9 +963,9 @@ struct ipw_priv {
1006 struct average average_rssi; 963 struct average average_rssi;
1007 struct average average_noise; 964 struct average average_noise;
1008 u32 port_type; 965 u32 port_type;
1009 int rx_bufs_min; /**< minimum number of bufs in Rx queue */ 966 int rx_bufs_min; /**< minimum number of bufs in Rx queue */
1010 int rx_pend_max; /**< maximum pending buffers for one IRQ */ 967 int rx_pend_max; /**< maximum pending buffers for one IRQ */
1011 u32 hcmd_seq; /**< sequence number for hcmd */ 968 u32 hcmd_seq; /**< sequence number for hcmd */
1012 u32 missed_beacon_threshold; 969 u32 missed_beacon_threshold;
1013 u32 roaming_threshold; 970 u32 roaming_threshold;
1014 971
@@ -1017,17 +974,17 @@ struct ipw_priv {
1017 974
1018 unsigned long ts_scan_abort; 975 unsigned long ts_scan_abort;
1019 struct ipw_supported_rates rates; 976 struct ipw_supported_rates rates;
1020 struct ipw_rates phy[3]; /**< PHY restrictions, per band */ 977 struct ipw_rates phy[3]; /**< PHY restrictions, per band */
1021 struct ipw_rates supp; /**< software defined */ 978 struct ipw_rates supp; /**< software defined */
1022 struct ipw_rates extended; /**< use for corresp. IE, AP only */ 979 struct ipw_rates extended; /**< use for corresp. IE, AP only */
1023 980
1024 struct notif_link_deterioration last_link_deterioration; /** for statistics */ 981 struct notif_link_deterioration last_link_deterioration; /** for statistics */
1025 struct ipw_cmd* hcmd; /**< host command currently executed */ 982 struct ipw_cmd *hcmd; /**< host command currently executed */
1026 983
1027 wait_queue_head_t hcmd_wq; /**< host command waits for execution */ 984 wait_queue_head_t hcmd_wq; /**< host command waits for execution */
1028 u32 tsf_bcn[2]; /**< TSF from latest beacon */ 985 u32 tsf_bcn[2]; /**< TSF from latest beacon */
1029 986
1030 struct notif_calibration calib; /**< last calibration */ 987 struct notif_calibration calib; /**< last calibration */
1031 988
1032 /* ordinal interface with firmware */ 989 /* ordinal interface with firmware */
1033 u32 table0_addr; 990 u32 table0_addr;
@@ -1067,8 +1024,8 @@ struct ipw_priv {
1067 u32 tx_packets; 1024 u32 tx_packets;
1068 u32 quality; 1025 u32 quality;
1069 1026
1070 /* eeprom */ 1027 /* eeprom */
1071 u8 eeprom[0x100]; /* 256 bytes of eeprom */ 1028 u8 eeprom[0x100]; /* 256 bytes of eeprom */
1072 int eeprom_delay; 1029 int eeprom_delay;
1073 1030
1074 struct iw_statistics wstats; 1031 struct iw_statistics wstats;
@@ -1091,7 +1048,6 @@ struct ipw_priv {
1091 1048
1092 struct tasklet_struct irq_tasklet; 1049 struct tasklet_struct irq_tasklet;
1093 1050
1094
1095#define IPW_2200BG 1 1051#define IPW_2200BG 1
1096#define IPW_2915ABG 2 1052#define IPW_2915ABG 2
1097 u8 adapter; 1053 u8 adapter;
@@ -1114,7 +1070,6 @@ struct ipw_priv {
1114 u32 indirect_byte; 1070 u32 indirect_byte;
1115}; /*ipw_priv */ 1071}; /*ipw_priv */
1116 1072
1117
1118/* debug macros */ 1073/* debug macros */
1119 1074
1120#ifdef CONFIG_IPW_DEBUG 1075#ifdef CONFIG_IPW_DEBUG
@@ -1170,7 +1125,6 @@ do { if (ipw_debug_level & (level)) \
1170#define IPW_DL_RF_KILL (1<<17) 1125#define IPW_DL_RF_KILL (1<<17)
1171#define IPW_DL_FW_ERRORS (1<<18) 1126#define IPW_DL_FW_ERRORS (1<<18)
1172 1127
1173
1174#define IPW_DL_ORD (1<<20) 1128#define IPW_DL_ORD (1<<20)
1175 1129
1176#define IPW_DL_FRAG (1<<21) 1130#define IPW_DL_FRAG (1<<21)
@@ -1184,7 +1138,6 @@ do { if (ipw_debug_level & (level)) \
1184 1138
1185#define IPW_DL_STATS (1<<29) 1139#define IPW_DL_STATS (1<<29)
1186 1140
1187
1188#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a) 1141#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
1189#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a) 1142#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
1190#define IPW_DEBUG_INFO(f, a...) IPW_DEBUG(IPW_DL_INFO, f, ## a) 1143#define IPW_DEBUG_INFO(f, a...) IPW_DEBUG(IPW_DL_INFO, f, ## a)
@@ -1253,12 +1206,12 @@ do { if (ipw_debug_level & (level)) \
1253/* 1206/*
1254 * RESET Register Bit Indexes 1207 * RESET Register Bit Indexes
1255 */ 1208 */
1256#define CBD_RESET_REG_PRINCETON_RESET 0x00000001 /* Bit 0 (LSB) */ 1209#define CBD_RESET_REG_PRINCETON_RESET 0x00000001 /* Bit 0 (LSB) */
1257#define CX2_RESET_REG_SW_RESET 0x00000080 /* Bit 7 */ 1210#define CX2_RESET_REG_SW_RESET 0x00000080 /* Bit 7 */
1258#define CX2_RESET_REG_MASTER_DISABLED 0x00000100 /* Bit 8 */ 1211#define CX2_RESET_REG_MASTER_DISABLED 0x00000100 /* Bit 8 */
1259#define CX2_RESET_REG_STOP_MASTER 0x00000200 /* Bit 9 */ 1212#define CX2_RESET_REG_STOP_MASTER 0x00000200 /* Bit 9 */
1260#define CX2_ARC_KESHET_CONFIG 0x08000000 /* Bit 27 */ 1213#define CX2_ARC_KESHET_CONFIG 0x08000000 /* Bit 27 */
1261#define CX2_START_STANDBY 0x00000004 /* Bit 2 */ 1214#define CX2_START_STANDBY 0x00000004 /* Bit 2 */
1262 1215
1263#define CX2_CSR_CIS_UPPER_BOUND 0x00000200 1216#define CX2_CSR_CIS_UPPER_BOUND 0x00000200
1264#define CX2_DOMAIN_0_END 0x1000 1217#define CX2_DOMAIN_0_END 0x1000
@@ -1289,14 +1242,12 @@ do { if (ipw_debug_level & (level)) \
1289#define CB_SRC_SIZE_LONG 0x00200000 1242#define CB_SRC_SIZE_LONG 0x00200000
1290#define CB_DEST_SIZE_LONG 0x00020000 1243#define CB_DEST_SIZE_LONG 0x00020000
1291 1244
1292
1293/* DMA DEFINES */ 1245/* DMA DEFINES */
1294 1246
1295#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000 1247#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000
1296#define DMA_CB_STOP_AND_ABORT 0x00000C00 1248#define DMA_CB_STOP_AND_ABORT 0x00000C00
1297#define DMA_CB_START 0x00000100 1249#define DMA_CB_START 0x00000100
1298 1250
1299
1300#define CX2_SHARED_SRAM_SIZE 0x00030000 1251#define CX2_SHARED_SRAM_SIZE 0x00030000
1301#define CX2_SHARED_SRAM_DMA_CONTROL 0x00027000 1252#define CX2_SHARED_SRAM_DMA_CONTROL 0x00027000
1302#define CB_MAX_LENGTH 0x1FFF 1253#define CB_MAX_LENGTH 0x1FFF
@@ -1304,7 +1255,6 @@ do { if (ipw_debug_level & (level)) \
1304#define CX2_HOST_EEPROM_DATA_SRAM_SIZE 0xA18 1255#define CX2_HOST_EEPROM_DATA_SRAM_SIZE 0xA18
1305#define CX2_EEPROM_IMAGE_SIZE 0x100 1256#define CX2_EEPROM_IMAGE_SIZE 0x100
1306 1257
1307
1308/* DMA defs */ 1258/* DMA defs */
1309#define CX2_DMA_I_CURRENT_CB 0x003000D0 1259#define CX2_DMA_I_CURRENT_CB 0x003000D0
1310#define CX2_DMA_O_CURRENT_CB 0x003000D4 1260#define CX2_DMA_O_CURRENT_CB 0x003000D4
@@ -1356,7 +1306,6 @@ do { if (ipw_debug_level & (level)) \
1356#define IPW_WHO_IS_AWAKE (CX2_SHARED_LOWER_BOUND + 0xB14) 1306#define IPW_WHO_IS_AWAKE (CX2_SHARED_LOWER_BOUND + 0xB14)
1357#define IPW_DURING_ATIM_WINDOW (CX2_SHARED_LOWER_BOUND + 0xB18) 1307#define IPW_DURING_ATIM_WINDOW (CX2_SHARED_LOWER_BOUND + 0xB18)
1358 1308
1359
1360#define MSB 1 1309#define MSB 1
1361#define LSB 0 1310#define LSB 0
1362#define WORD_TO_BYTE(_word) ((_word) * sizeof(u16)) 1311#define WORD_TO_BYTE(_word) ((_word) * sizeof(u16))
@@ -1365,16 +1314,16 @@ do { if (ipw_debug_level & (level)) \
1365 ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) ) 1314 ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) )
1366 1315
1367/* EEPROM access by BYTE */ 1316/* EEPROM access by BYTE */
1368#define EEPROM_PME_CAPABILITY (GET_EEPROM_ADDR(0x09,MSB)) /* 1 byte */ 1317#define EEPROM_PME_CAPABILITY (GET_EEPROM_ADDR(0x09,MSB)) /* 1 byte */
1369#define EEPROM_MAC_ADDRESS (GET_EEPROM_ADDR(0x21,LSB)) /* 6 byte */ 1318#define EEPROM_MAC_ADDRESS (GET_EEPROM_ADDR(0x21,LSB)) /* 6 byte */
1370#define EEPROM_VERSION (GET_EEPROM_ADDR(0x24,MSB)) /* 1 byte */ 1319#define EEPROM_VERSION (GET_EEPROM_ADDR(0x24,MSB)) /* 1 byte */
1371#define EEPROM_NIC_TYPE (GET_EEPROM_ADDR(0x25,LSB)) /* 1 byte */ 1320#define EEPROM_NIC_TYPE (GET_EEPROM_ADDR(0x25,LSB)) /* 1 byte */
1372#define EEPROM_SKU_CAPABILITY (GET_EEPROM_ADDR(0x25,MSB)) /* 1 byte */ 1321#define EEPROM_SKU_CAPABILITY (GET_EEPROM_ADDR(0x25,MSB)) /* 1 byte */
1373#define EEPROM_COUNTRY_CODE (GET_EEPROM_ADDR(0x26,LSB)) /* 3 bytes */ 1322#define EEPROM_COUNTRY_CODE (GET_EEPROM_ADDR(0x26,LSB)) /* 3 bytes */
1374#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB)) /* 2 bytes */ 1323#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB)) /* 2 bytes */
1375#define EEPROM_IBSS_CHANNELS_A (GET_EEPROM_ADDR(0x29,MSB)) /* 5 bytes */ 1324#define EEPROM_IBSS_CHANNELS_A (GET_EEPROM_ADDR(0x29,MSB)) /* 5 bytes */
1376#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */ 1325#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */
1377#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */ 1326#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */
1378 1327
1379/* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/ 1328/* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/
1380#define EEPROM_NIC_TYPE_STANDARD 0 1329#define EEPROM_NIC_TYPE_STANDARD 0
@@ -1479,7 +1428,6 @@ enum {
1479#define IPW_RATE_CAPABILITIES 1 1428#define IPW_RATE_CAPABILITIES 1
1480#define IPW_RATE_CONNECT 0 1429#define IPW_RATE_CONNECT 0
1481 1430
1482
1483/* 1431/*
1484 * Rate values and masks 1432 * Rate values and masks
1485 */ 1433 */
@@ -1524,12 +1472,6 @@ enum {
1524 IPW_ORD_STAT_TX_DIR_DATA_B_11, 1472 IPW_ORD_STAT_TX_DIR_DATA_B_11,
1525 /* Hole */ 1473 /* Hole */
1526 1474
1527
1528
1529
1530
1531
1532
1533 IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19, 1475 IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19,
1534 IPW_ORD_STAT_TX_DIR_DATA_G_2, 1476 IPW_ORD_STAT_TX_DIR_DATA_G_2,
1535 IPW_ORD_STAT_TX_DIR_DATA_G_5_5, 1477 IPW_ORD_STAT_TX_DIR_DATA_G_5_5,
@@ -1549,12 +1491,6 @@ enum {
1549 IPW_ORD_STAT_TX_NON_DIR_DATA_B_11, 1491 IPW_ORD_STAT_TX_NON_DIR_DATA_B_11,
1550 /* Hole */ 1492 /* Hole */
1551 1493
1552
1553
1554
1555
1556
1557
1558 IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44, 1494 IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44,
1559 IPW_ORD_STAT_TX_NON_DIR_DATA_G_2, 1495 IPW_ORD_STAT_TX_NON_DIR_DATA_G_2,
1560 IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5, 1496 IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5,
@@ -1685,7 +1621,7 @@ struct host_cmd {
1685#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08 1621#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08
1686#define CFG_BT_COEXISTENCE_OOB 0x10 1622#define CFG_BT_COEXISTENCE_OOB 0x10
1687#define CFG_BT_COEXISTENCE_MAX 0xFF 1623#define CFG_BT_COEXISTENCE_MAX 0xFF
1688#define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM*/ 1624#define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM */
1689 1625
1690#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0 1626#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0
1691#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1 1627#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1
@@ -1727,11 +1663,11 @@ static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
1727 fc = le16_to_cpu(hdr->frame_ctl); 1663 fc = le16_to_cpu(hdr->frame_ctl);
1728 1664
1729 /* 1665 /*
1730 * Function ToDS FromDS 1666 * Function ToDS FromDS
1731 * IBSS 0 0 1667 * IBSS 0 0
1732 * To AP 1 0 1668 * To AP 1 0
1733 * From AP 0 1 1669 * From AP 0 1
1734 * WDS (bridge) 1 1 1670 * WDS (bridge) 1 1
1735 * 1671 *
1736 * Only WDS frames use Address4 among them. --YZ 1672 * Only WDS frames use Address4 among them. --YZ
1737 */ 1673 */
@@ -1741,4 +1677,4 @@ static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
1741 return retval; 1677 return retval;
1742} 1678}
1743 1679
1744#endif /* __ipw2200_h__ */ 1680#endif /* __ipw2200_h__ */
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index 5f507c49907b..ca6c03c89926 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -471,12 +471,12 @@ static dev_link_t *netwave_attach(void)
471 dev->get_stats = &netwave_get_stats; 471 dev->get_stats = &netwave_get_stats;
472 dev->set_multicast_list = &set_multicast_list; 472 dev->set_multicast_list = &set_multicast_list;
473 /* wireless extensions */ 473 /* wireless extensions */
474#ifdef WIRELESS_EXT 474#if WIRELESS_EXT <= 16
475 dev->get_wireless_stats = &netwave_get_wireless_stats; 475 dev->get_wireless_stats = &netwave_get_wireless_stats;
476#endif /* WIRELESS_EXT <= 16 */
476#if WIRELESS_EXT > 12 477#if WIRELESS_EXT > 12
477 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def; 478 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def;
478#endif /* WIRELESS_EXT > 12 */ 479#endif /* WIRELESS_EXT > 12 */
479#endif /* WIRELESS_EXT */
480 dev->do_ioctl = &netwave_ioctl; 480 dev->do_ioctl = &netwave_ioctl;
481 481
482 dev->tx_timeout = &netwave_watchdog; 482 dev->tx_timeout = &netwave_watchdog;
@@ -839,6 +839,9 @@ static const struct iw_handler_def netwave_handler_def =
839 .standard = (iw_handler *) netwave_handler, 839 .standard = (iw_handler *) netwave_handler,
840 .private = (iw_handler *) netwave_private_handler, 840 .private = (iw_handler *) netwave_private_handler,
841 .private_args = (struct iw_priv_args *) netwave_private_args, 841 .private_args = (struct iw_priv_args *) netwave_private_args,
842#if WIRELESS_EXT > 16
843 .get_wireless_stats = netwave_get_wireless_stats,
844#endif /* WIRELESS_EXT > 16 */
842}; 845};
843#endif /* WIRELESS_EXT > 12 */ 846#endif /* WIRELESS_EXT > 12 */
844 847
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 0f29a9c7bc2c..9a8790e3580c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2727,6 +2727,9 @@ const struct iw_handler_def prism54_handler_def = {
2727 .standard = (iw_handler *) prism54_handler, 2727 .standard = (iw_handler *) prism54_handler,
2728 .private = (iw_handler *) prism54_private_handler, 2728 .private = (iw_handler *) prism54_private_handler,
2729 .private_args = (struct iw_priv_args *) prism54_private_args, 2729 .private_args = (struct iw_priv_args *) prism54_private_args,
2730#if WIRELESS_EXT > 16
2731 .get_wireless_stats = prism54_get_wireless_stats,
2732#endif /* WIRELESS_EXT > 16 */
2730#if WIRELESS_EXT == 16 2733#if WIRELESS_EXT == 16
2731 .spy_offset = offsetof(islpci_private, spy_data), 2734 .spy_offset = offsetof(islpci_private, spy_data),
2732#endif /* WIRELESS_EXT == 16 */ 2735#endif /* WIRELESS_EXT == 16 */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index efab07e9e24e..6f13d4a8e2d3 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -815,7 +815,6 @@ islpci_setup(struct pci_dev *pdev)
815 ndev->open = &islpci_open; 815 ndev->open = &islpci_open;
816 ndev->stop = &islpci_close; 816 ndev->stop = &islpci_close;
817 ndev->get_stats = &islpci_statistics; 817 ndev->get_stats = &islpci_statistics;
818 ndev->get_wireless_stats = &prism54_get_wireless_stats;
819 ndev->do_ioctl = &prism54_ioctl; 818 ndev->do_ioctl = &prism54_ioctl;
820 ndev->wireless_handlers = 819 ndev->wireless_handlers =
821 (struct iw_handler_def *) &prism54_handler_def; 820 (struct iw_handler_def *) &prism54_handler_def;
@@ -844,6 +843,8 @@ islpci_setup(struct pci_dev *pdev)
844 /* Add pointers to enable iwspy support. */ 843 /* Add pointers to enable iwspy support. */
845 priv->wireless_data.spy_data = &priv->spy_data; 844 priv->wireless_data.spy_data = &priv->spy_data;
846 ndev->wireless_data = &priv->wireless_data; 845 ndev->wireless_data = &priv->wireless_data;
846#else /* WIRELESS_EXT > 16 */
847 ndev->get_wireless_stats = &prism54_get_wireless_stats;
847#endif /* WIRELESS_EXT > 16 */ 848#endif /* WIRELESS_EXT > 16 */
848 849
849 /* save the start and end address of the PCI memory area */ 850 /* save the start and end address of the PCI memory area */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0e0ba614259a..e9c5ea0f5535 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -53,6 +53,7 @@
53#include <pcmcia/ds.h> 53#include <pcmcia/ds.h>
54#include <pcmcia/mem_op.h> 54#include <pcmcia/mem_op.h>
55 55
56#include <net/ieee80211.h>
56#include <linux/wireless.h> 57#include <linux/wireless.h>
57 58
58#include <asm/io.h> 59#include <asm/io.h>
@@ -64,7 +65,6 @@
64#define WIRELESS_SPY /* Enable spying addresses */ 65#define WIRELESS_SPY /* Enable spying addresses */
65/* Definitions we need for spy */ 66/* Definitions we need for spy */
66typedef struct iw_statistics iw_stats; 67typedef struct iw_statistics iw_stats;
67typedef struct iw_quality iw_qual;
68typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */ 68typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
69 69
70#include "rayctl.h" 70#include "rayctl.h"
@@ -101,7 +101,6 @@ static int ray_dev_close(struct net_device *dev);
101static int ray_dev_config(struct net_device *dev, struct ifmap *map); 101static int ray_dev_config(struct net_device *dev, struct ifmap *map);
102static struct net_device_stats *ray_get_stats(struct net_device *dev); 102static struct net_device_stats *ray_get_stats(struct net_device *dev);
103static int ray_dev_init(struct net_device *dev); 103static int ray_dev_init(struct net_device *dev);
104static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
105 104
106static struct ethtool_ops netdev_ethtool_ops; 105static struct ethtool_ops netdev_ethtool_ops;
107 106
@@ -114,9 +113,8 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
114static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, UCHAR msg_type, 113static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, UCHAR msg_type,
115 unsigned char *data); 114 unsigned char *data);
116static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len); 115static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len);
117#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */
118static iw_stats * ray_get_wireless_stats(struct net_device * dev); 116static iw_stats * ray_get_wireless_stats(struct net_device * dev);
119#endif /* WIRELESS_EXT > 7 */ 117static const struct iw_handler_def ray_handler_def;
120 118
121/***** Prototypes for raylink functions **************************************/ 119/***** Prototypes for raylink functions **************************************/
122static int asc_to_int(char a); 120static int asc_to_int(char a);
@@ -373,11 +371,12 @@ static dev_link_t *ray_attach(void)
373 dev->hard_start_xmit = &ray_dev_start_xmit; 371 dev->hard_start_xmit = &ray_dev_start_xmit;
374 dev->set_config = &ray_dev_config; 372 dev->set_config = &ray_dev_config;
375 dev->get_stats = &ray_get_stats; 373 dev->get_stats = &ray_get_stats;
376 dev->do_ioctl = &ray_dev_ioctl;
377 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 374 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
378#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */ 375 dev->wireless_handlers = &ray_handler_def;
379 dev->get_wireless_stats = ray_get_wireless_stats; 376#ifdef WIRELESS_SPY
380#endif 377 local->wireless_data.spy_data = &local->spy_data;
378 dev->wireless_data = &local->wireless_data;
379#endif /* WIRELESS_SPY */
381 380
382 dev->set_multicast_list = &set_multicast_list; 381 dev->set_multicast_list = &set_multicast_list;
383 382
@@ -1201,436 +1200,420 @@ static struct ethtool_ops netdev_ethtool_ops = {
1201 1200
1202/*====================================================================*/ 1201/*====================================================================*/
1203 1202
1204static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1203/*------------------------------------------------------------------*/
1204/*
1205 * Wireless Handler : get protocol name
1206 */
1207static int ray_get_name(struct net_device *dev,
1208 struct iw_request_info *info,
1209 char *cwrq,
1210 char *extra)
1205{ 1211{
1206 ray_dev_t *local = (ray_dev_t *)dev->priv; 1212 strcpy(cwrq, "IEEE 802.11-FH");
1207 dev_link_t *link = local->finder; 1213 return 0;
1208 int err = 0; 1214}
1209#if WIRELESS_EXT > 7
1210 struct iwreq *wrq = (struct iwreq *) ifr;
1211#endif /* WIRELESS_EXT > 7 */
1212#ifdef WIRELESS_SPY
1213 struct sockaddr address[IW_MAX_SPY];
1214#endif /* WIRELESS_SPY */
1215 1215
1216 if (!(link->state & DEV_PRESENT)) { 1216/*------------------------------------------------------------------*/
1217 DEBUG(2,"ray_dev_ioctl - device not present\n"); 1217/*
1218 return -1; 1218 * Wireless Handler : set frequency
1219 } 1219 */
1220 DEBUG(2,"ray_cs IOCTL dev=%p, ifr=%p, cmd = 0x%x\n",dev,ifr,cmd); 1220static int ray_set_freq(struct net_device *dev,
1221 /* Validate the command */ 1221 struct iw_request_info *info,
1222 switch (cmd) 1222 struct iw_freq *fwrq,
1223 { 1223 char *extra)
1224#if WIRELESS_EXT > 7 1224{
1225 /* --------------- WIRELESS EXTENSIONS --------------- */ 1225 ray_dev_t *local = (ray_dev_t *)dev->priv;
1226 /* Get name */ 1226 int err = -EINPROGRESS; /* Call commit handler */
1227 case SIOCGIWNAME:
1228 strcpy(wrq->u.name, "IEEE 802.11-FH");
1229 break;
1230
1231 /* Get frequency/channel */
1232 case SIOCGIWFREQ:
1233 wrq->u.freq.m = local->sparm.b5.a_hop_pattern;
1234 wrq->u.freq.e = 0;
1235 break;
1236
1237 /* Set frequency/channel */
1238 case SIOCSIWFREQ:
1239 /* Reject if card is already initialised */
1240 if(local->card_status != CARD_AWAITING_PARAM)
1241 {
1242 err = -EBUSY;
1243 break;
1244 }
1245 1227
1246 /* Setting by channel number */ 1228 /* Reject if card is already initialised */
1247 if ((wrq->u.freq.m > USA_HOP_MOD) || (wrq->u.freq.e > 0)) 1229 if(local->card_status != CARD_AWAITING_PARAM)
1248 err = -EOPNOTSUPP; 1230 return -EBUSY;
1249 else
1250 local->sparm.b5.a_hop_pattern = wrq->u.freq.m;
1251 break;
1252 1231
1253 /* Get current network name (ESSID) */ 1232 /* Setting by channel number */
1254 case SIOCGIWESSID: 1233 if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0))
1255 if (wrq->u.data.pointer) 1234 err = -EOPNOTSUPP;
1256 { 1235 else
1257 char essid[IW_ESSID_MAX_SIZE + 1]; 1236 local->sparm.b5.a_hop_pattern = fwrq->m;
1258 /* Get the essid that was set */
1259 memcpy(essid, local->sparm.b5.a_current_ess_id,
1260 IW_ESSID_MAX_SIZE);
1261 essid[IW_ESSID_MAX_SIZE] = '\0';
1262
1263 /* Push it out ! */
1264 wrq->u.data.length = strlen(essid) + 1;
1265 wrq->u.data.flags = 1; /* active */
1266 if (copy_to_user(wrq->u.data.pointer, essid, sizeof(essid)))
1267 err = -EFAULT;
1268 }
1269 break;
1270 1237
1271 /* Set desired network name (ESSID) */ 1238 return err;
1272 case SIOCSIWESSID: 1239}
1273 /* Reject if card is already initialised */ 1240
1274 if(local->card_status != CARD_AWAITING_PARAM) 1241/*------------------------------------------------------------------*/
1275 { 1242/*
1276 err = -EBUSY; 1243 * Wireless Handler : get frequency
1277 break; 1244 */
1278 } 1245static int ray_get_freq(struct net_device *dev,
1246 struct iw_request_info *info,
1247 struct iw_freq *fwrq,
1248 char *extra)
1249{
1250 ray_dev_t *local = (ray_dev_t *)dev->priv;
1279 1251
1280 if (wrq->u.data.pointer) 1252 fwrq->m = local->sparm.b5.a_hop_pattern;
1281 { 1253 fwrq->e = 0;
1282 char card_essid[IW_ESSID_MAX_SIZE + 1]; 1254 return 0;
1283 1255}
1284 /* Check if we asked for `any' */ 1256
1285 if(wrq->u.data.flags == 0) 1257/*------------------------------------------------------------------*/
1286 { 1258/*
1259 * Wireless Handler : set ESSID
1260 */
1261static int ray_set_essid(struct net_device *dev,
1262 struct iw_request_info *info,
1263 struct iw_point *dwrq,
1264 char *extra)
1265{
1266 ray_dev_t *local = (ray_dev_t *)dev->priv;
1267
1268 /* Reject if card is already initialised */
1269 if(local->card_status != CARD_AWAITING_PARAM)
1270 return -EBUSY;
1271
1272 /* Check if we asked for `any' */
1273 if(dwrq->flags == 0) {
1287 /* Corey : can you do that ? */ 1274 /* Corey : can you do that ? */
1288 err = -EOPNOTSUPP; 1275 return -EOPNOTSUPP;
1289 } 1276 } else {
1290 else
1291 {
1292 /* Check the size of the string */ 1277 /* Check the size of the string */
1293 if(wrq->u.data.length > 1278 if(dwrq->length > IW_ESSID_MAX_SIZE + 1) {
1294 IW_ESSID_MAX_SIZE + 1) 1279 return -E2BIG;
1295 {
1296 err = -E2BIG;
1297 break;
1298 }
1299 if (copy_from_user(card_essid,
1300 wrq->u.data.pointer,
1301 wrq->u.data.length)) {
1302 err = -EFAULT;
1303 break;
1304 } 1280 }
1305 card_essid[IW_ESSID_MAX_SIZE] = '\0';
1306 1281
1307 /* Set the ESSID in the card */ 1282 /* Set the ESSID in the card */
1308 memcpy(local->sparm.b5.a_current_ess_id, card_essid, 1283 memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
1309 IW_ESSID_MAX_SIZE); 1284 memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length);
1310 }
1311 } 1285 }
1312 break;
1313
1314 /* Get current Access Point (BSSID in our case) */
1315 case SIOCGIWAP:
1316 memcpy(wrq->u.ap_addr.sa_data, local->bss_id, ETH_ALEN);
1317 wrq->u.ap_addr.sa_family = ARPHRD_ETHER;
1318 break;
1319
1320 /* Get the current bit-rate */
1321 case SIOCGIWRATE:
1322 if(local->net_default_tx_rate == 3)
1323 wrq->u.bitrate.value = 2000000; /* Hum... */
1324 else
1325 wrq->u.bitrate.value = local->net_default_tx_rate * 500000;
1326 wrq->u.bitrate.fixed = 0; /* We are in auto mode */
1327 break;
1328
1329 /* Set the desired bit-rate */
1330 case SIOCSIWRATE:
1331 /* Check if rate is in range */
1332 if((wrq->u.bitrate.value != 1000000) &&
1333 (wrq->u.bitrate.value != 2000000))
1334 {
1335 err = -EINVAL;
1336 break;
1337 }
1338 /* Hack for 1.5 Mb/s instead of 2 Mb/s */
1339 if((local->fw_ver == 0x55) && /* Please check */
1340 (wrq->u.bitrate.value == 2000000))
1341 local->net_default_tx_rate = 3;
1342 else
1343 local->net_default_tx_rate = wrq->u.bitrate.value/500000;
1344 break;
1345
1346 /* Get the current RTS threshold */
1347 case SIOCGIWRTS:
1348 wrq->u.rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
1349 + local->sparm.b5.a_rts_threshold[1];
1350#if WIRELESS_EXT > 8
1351 wrq->u.rts.disabled = (wrq->u.rts.value == 32767);
1352#endif /* WIRELESS_EXT > 8 */
1353 wrq->u.rts.fixed = 1;
1354 break;
1355
1356 /* Set the desired RTS threshold */
1357 case SIOCSIWRTS:
1358 {
1359 int rthr = wrq->u.rts.value;
1360 1286
1361 /* Reject if card is already initialised */ 1287 return -EINPROGRESS; /* Call commit handler */
1362 if(local->card_status != CARD_AWAITING_PARAM) 1288}
1363 {
1364 err = -EBUSY;
1365 break;
1366 }
1367 1289
1368 /* if(wrq->u.rts.fixed == 0) we should complain */ 1290/*------------------------------------------------------------------*/
1369#if WIRELESS_EXT > 8 1291/*
1370 if(wrq->u.rts.disabled) 1292 * Wireless Handler : get ESSID
1371 rthr = 32767; 1293 */
1294static int ray_get_essid(struct net_device *dev,
1295 struct iw_request_info *info,
1296 struct iw_point *dwrq,
1297 char *extra)
1298{
1299 ray_dev_t *local = (ray_dev_t *)dev->priv;
1300
1301 /* Get the essid that was set */
1302 memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
1303 extra[IW_ESSID_MAX_SIZE] = '\0';
1304
1305 /* Push it out ! */
1306 dwrq->length = strlen(extra) + 1;
1307 dwrq->flags = 1; /* active */
1308
1309 return 0;
1310}
1311
1312/*------------------------------------------------------------------*/
1313/*
1314 * Wireless Handler : get AP address
1315 */
1316static int ray_get_wap(struct net_device *dev,
1317 struct iw_request_info *info,
1318 struct sockaddr *awrq,
1319 char *extra)
1320{
1321 ray_dev_t *local = (ray_dev_t *)dev->priv;
1322
1323 memcpy(awrq->sa_data, local->bss_id, ETH_ALEN);
1324 awrq->sa_family = ARPHRD_ETHER;
1325
1326 return 0;
1327}
1328
1329/*------------------------------------------------------------------*/
1330/*
1331 * Wireless Handler : set Bit-Rate
1332 */
1333static int ray_set_rate(struct net_device *dev,
1334 struct iw_request_info *info,
1335 struct iw_param *vwrq,
1336 char *extra)
1337{
1338 ray_dev_t *local = (ray_dev_t *)dev->priv;
1339
1340 /* Reject if card is already initialised */
1341 if(local->card_status != CARD_AWAITING_PARAM)
1342 return -EBUSY;
1343
1344 /* Check if rate is in range */
1345 if((vwrq->value != 1000000) && (vwrq->value != 2000000))
1346 return -EINVAL;
1347
1348 /* Hack for 1.5 Mb/s instead of 2 Mb/s */
1349 if((local->fw_ver == 0x55) && /* Please check */
1350 (vwrq->value == 2000000))
1351 local->net_default_tx_rate = 3;
1372 else 1352 else
1373#endif /* WIRELESS_EXT > 8 */ 1353 local->net_default_tx_rate = vwrq->value/500000;
1374 if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */ 1354
1375 { 1355 return 0;
1376 err = -EINVAL; 1356}
1377 break; 1357
1378 } 1358/*------------------------------------------------------------------*/
1359/*
1360 * Wireless Handler : get Bit-Rate
1361 */
1362static int ray_get_rate(struct net_device *dev,
1363 struct iw_request_info *info,
1364 struct iw_param *vwrq,
1365 char *extra)
1366{
1367 ray_dev_t *local = (ray_dev_t *)dev->priv;
1368
1369 if(local->net_default_tx_rate == 3)
1370 vwrq->value = 2000000; /* Hum... */
1371 else
1372 vwrq->value = local->net_default_tx_rate * 500000;
1373 vwrq->fixed = 0; /* We are in auto mode */
1374
1375 return 0;
1376}
1377
1378/*------------------------------------------------------------------*/
1379/*
1380 * Wireless Handler : set RTS threshold
1381 */
1382static int ray_set_rts(struct net_device *dev,
1383 struct iw_request_info *info,
1384 struct iw_param *vwrq,
1385 char *extra)
1386{
1387 ray_dev_t *local = (ray_dev_t *)dev->priv;
1388 int rthr = vwrq->value;
1389
1390 /* Reject if card is already initialised */
1391 if(local->card_status != CARD_AWAITING_PARAM)
1392 return -EBUSY;
1393
1394 /* if(wrq->u.rts.fixed == 0) we should complain */
1395 if(vwrq->disabled)
1396 rthr = 32767;
1397 else {
1398 if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
1399 return -EINVAL;
1400 }
1379 local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF; 1401 local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
1380 local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF; 1402 local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
1381 }
1382 break;
1383 1403
1384 /* Get the current fragmentation threshold */ 1404 return -EINPROGRESS; /* Call commit handler */
1385 case SIOCGIWFRAG: 1405}
1386 wrq->u.frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
1387 + local->sparm.b5.a_frag_threshold[1];
1388#if WIRELESS_EXT > 8
1389 wrq->u.frag.disabled = (wrq->u.frag.value == 32767);
1390#endif /* WIRELESS_EXT > 8 */
1391 wrq->u.frag.fixed = 1;
1392 break;
1393 1406
1394 /* Set the desired fragmentation threshold */
1395 case SIOCSIWFRAG:
1396 {
1397 int fthr = wrq->u.frag.value;
1398 1407
1399 /* Reject if card is already initialised */ 1408/*------------------------------------------------------------------*/
1400 if(local->card_status != CARD_AWAITING_PARAM) 1409/*
1401 { 1410 * Wireless Handler : get RTS threshold
1402 err = -EBUSY; 1411 */
1403 break; 1412static int ray_get_rts(struct net_device *dev,
1404 } 1413 struct iw_request_info *info,
1414 struct iw_param *vwrq,
1415 char *extra)
1416{
1417 ray_dev_t *local = (ray_dev_t *)dev->priv;
1418
1419 vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8)
1420 + local->sparm.b5.a_rts_threshold[1];
1421 vwrq->disabled = (vwrq->value == 32767);
1422 vwrq->fixed = 1;
1423
1424 return 0;
1425}
1426
1427/*------------------------------------------------------------------*/
1428/*
1429 * Wireless Handler : set Fragmentation threshold
1430 */
1431static int ray_set_frag(struct net_device *dev,
1432 struct iw_request_info *info,
1433 struct iw_param *vwrq,
1434 char *extra)
1435{
1436 ray_dev_t *local = (ray_dev_t *)dev->priv;
1437 int fthr = vwrq->value;
1438
1439 /* Reject if card is already initialised */
1440 if(local->card_status != CARD_AWAITING_PARAM)
1441 return -EBUSY;
1405 1442
1406 /* if(wrq->u.frag.fixed == 0) should complain */ 1443 /* if(wrq->u.frag.fixed == 0) should complain */
1407#if WIRELESS_EXT > 8 1444 if(vwrq->disabled)
1408 if(wrq->u.frag.disabled) 1445 fthr = 32767;
1409 fthr = 32767; 1446 else {
1410 else 1447 if((fthr < 256) || (fthr > 2347)) /* To check out ! */
1411#endif /* WIRELESS_EXT > 8 */ 1448 return -EINVAL;
1412 if((fthr < 256) || (fthr > 2347)) /* To check out ! */ 1449 }
1413 {
1414 err = -EINVAL;
1415 break;
1416 }
1417 local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF; 1450 local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
1418 local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF; 1451 local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
1419 }
1420 break;
1421 1452
1422#endif /* WIRELESS_EXT > 7 */ 1453 return -EINPROGRESS; /* Call commit handler */
1423#if WIRELESS_EXT > 8 1454}
1424 1455
1425 /* Get the current mode of operation */ 1456/*------------------------------------------------------------------*/
1426 case SIOCGIWMODE: 1457/*
1427 if(local->sparm.b5.a_network_type) 1458 * Wireless Handler : get Fragmentation threshold
1428 wrq->u.mode = IW_MODE_INFRA; 1459 */
1429 else 1460static int ray_get_frag(struct net_device *dev,
1430 wrq->u.mode = IW_MODE_ADHOC; 1461 struct iw_request_info *info,
1431 break; 1462 struct iw_param *vwrq,
1463 char *extra)
1464{
1465 ray_dev_t *local = (ray_dev_t *)dev->priv;
1432 1466
1433 /* Set the current mode of operation */ 1467 vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8)
1434 case SIOCSIWMODE: 1468 + local->sparm.b5.a_frag_threshold[1];
1435 { 1469 vwrq->disabled = (vwrq->value == 32767);
1470 vwrq->fixed = 1;
1471
1472 return 0;
1473}
1474
1475/*------------------------------------------------------------------*/
1476/*
1477 * Wireless Handler : set Mode of Operation
1478 */
1479static int ray_set_mode(struct net_device *dev,
1480 struct iw_request_info *info,
1481 __u32 *uwrq,
1482 char *extra)
1483{
1484 ray_dev_t *local = (ray_dev_t *)dev->priv;
1485 int err = -EINPROGRESS; /* Call commit handler */
1436 char card_mode = 1; 1486 char card_mode = 1;
1437
1438 /* Reject if card is already initialised */
1439 if(local->card_status != CARD_AWAITING_PARAM)
1440 {
1441 err = -EBUSY;
1442 break;
1443 }
1444 1487
1445 switch (wrq->u.mode) 1488 /* Reject if card is already initialised */
1489 if(local->card_status != CARD_AWAITING_PARAM)
1490 return -EBUSY;
1491
1492 switch (*uwrq)
1446 { 1493 {
1447 case IW_MODE_ADHOC: 1494 case IW_MODE_ADHOC:
1448 card_mode = 0; 1495 card_mode = 0;
1449 // Fall through 1496 // Fall through
1450 case IW_MODE_INFRA: 1497 case IW_MODE_INFRA:
1451 local->sparm.b5.a_network_type = card_mode; 1498 local->sparm.b5.a_network_type = card_mode;
1452 break; 1499 break;
1453 default: 1500 default:
1454 err = -EINVAL; 1501 err = -EINVAL;
1455 } 1502 }
1456 }
1457 break;
1458 1503
1459#endif /* WIRELESS_EXT > 8 */ 1504 return err;
1460#if WIRELESS_EXT > 7 1505}
1461 /* ------------------ IWSPY SUPPORT ------------------ */
1462 /* Define the range (variations) of above parameters */
1463 case SIOCGIWRANGE:
1464 /* Basic checking... */
1465 if(wrq->u.data.pointer != (caddr_t) 0)
1466 {
1467 struct iw_range range;
1468 memset((char *) &range, 0, sizeof(struct iw_range));
1469
1470 /* Set the length (very important for backward compatibility) */
1471 wrq->u.data.length = sizeof(struct iw_range);
1472
1473#if WIRELESS_EXT > 10
1474 /* Set the Wireless Extension versions */
1475 range.we_version_compiled = WIRELESS_EXT;
1476 range.we_version_source = 9;
1477#endif /* WIRELESS_EXT > 10 */
1478
1479 /* Set information in the range struct */
1480 range.throughput = 1.1 * 1000 * 1000; /* Put the right number here */
1481 range.num_channels = hop_pattern_length[(int)country];
1482 range.num_frequency = 0;
1483 range.max_qual.qual = 0;
1484 range.max_qual.level = 255; /* What's the correct value ? */
1485 range.max_qual.noise = 255; /* Idem */
1486 range.num_bitrates = 2;
1487 range.bitrate[0] = 1000000; /* 1 Mb/s */
1488 range.bitrate[1] = 2000000; /* 2 Mb/s */
1489
1490 /* Copy structure to the user buffer */
1491 if(copy_to_user(wrq->u.data.pointer, &range,
1492 sizeof(struct iw_range)))
1493 err = -EFAULT;
1494 }
1495 break;
1496 1506
1497#ifdef WIRELESS_SPY 1507/*------------------------------------------------------------------*/
1498 /* Set addresses to spy */ 1508/*
1499 case SIOCSIWSPY: 1509 * Wireless Handler : get Mode of Operation
1500 /* Check the number of addresses */ 1510 */
1501 if(wrq->u.data.length > IW_MAX_SPY) 1511static int ray_get_mode(struct net_device *dev,
1502 { 1512 struct iw_request_info *info,
1503 err = -E2BIG; 1513 __u32 *uwrq,
1504 break; 1514 char *extra)
1505 } 1515{
1506 local->spy_number = wrq->u.data.length; 1516 ray_dev_t *local = (ray_dev_t *)dev->priv;
1507 1517
1508 /* If there is some addresses to copy */ 1518 if(local->sparm.b5.a_network_type)
1509 if(local->spy_number > 0) 1519 *uwrq = IW_MODE_INFRA;
1510 { 1520 else
1511 int i; 1521 *uwrq = IW_MODE_ADHOC;
1512
1513 /* Copy addresses to the driver */
1514 if(copy_from_user(address, wrq->u.data.pointer,
1515 sizeof(struct sockaddr) * local->spy_number))
1516 {
1517 err = -EFAULT;
1518 break;
1519 }
1520
1521 /* Copy addresses to the lp structure */
1522 for(i = 0; i < local->spy_number; i++)
1523 memcpy(local->spy_address[i], address[i].sa_data, ETH_ALEN);
1524
1525 /* Reset structure... */
1526 memset(local->spy_stat, 0x00, sizeof(iw_qual) * IW_MAX_SPY);
1527
1528#ifdef DEBUG_IOCTL_INFO
1529 printk(KERN_DEBUG "SetSpy - Set of new addresses is :\n");
1530 for(i = 0; i < local->spy_number; i++)
1531 printk(KERN_DEBUG "%02X:%02X:%02X:%02X:%02X:%02X\n",
1532 local->spy_address[i][0],
1533 local->spy_address[i][1],
1534 local->spy_address[i][2],
1535 local->spy_address[i][3],
1536 local->spy_address[i][4],
1537 local->spy_address[i][5]);
1538#endif /* DEBUG_IOCTL_INFO */
1539 }
1540 break;
1541 1522
1542 /* Get the spy list and spy stats */ 1523 return 0;
1543 case SIOCGIWSPY: 1524}
1544 /* Set the number of addresses */
1545 wrq->u.data.length = local->spy_number;
1546 1525
1547 /* If the user want to have the addresses back... */ 1526/*------------------------------------------------------------------*/
1548 if((local->spy_number > 0) && (wrq->u.data.pointer != (caddr_t) 0)) 1527/*
1549 { 1528 * Wireless Handler : get range info
1550 int i; 1529 */
1551 1530static int ray_get_range(struct net_device *dev,
1552 /* Copy addresses from the lp structure */ 1531 struct iw_request_info *info,
1553 for(i = 0; i < local->spy_number; i++) 1532 struct iw_point *dwrq,
1554 { 1533 char *extra)
1555 memcpy(address[i].sa_data, local->spy_address[i], ETH_ALEN); 1534{
1556 address[i].sa_family = ARPHRD_ETHER; 1535 struct iw_range *range = (struct iw_range *) extra;
1557 } 1536
1558 1537 memset((char *) range, 0, sizeof(struct iw_range));
1559 /* Copy addresses to the user buffer */ 1538
1560 if(copy_to_user(wrq->u.data.pointer, address, 1539 /* Set the length (very important for backward compatibility) */
1561 sizeof(struct sockaddr) * local->spy_number)) 1540 dwrq->length = sizeof(struct iw_range);
1562 { 1541
1563 err = -EFAULT; 1542 /* Set the Wireless Extension versions */
1564 break; 1543 range->we_version_compiled = WIRELESS_EXT;
1565 } 1544 range->we_version_source = 9;
1566 1545
1567 /* Copy stats to the user buffer (just after) */ 1546 /* Set information in the range struct */
1568 if(copy_to_user(wrq->u.data.pointer + 1547 range->throughput = 1.1 * 1000 * 1000; /* Put the right number here */
1569 (sizeof(struct sockaddr) * local->spy_number), 1548 range->num_channels = hop_pattern_length[(int)country];
1570 local->spy_stat, sizeof(iw_qual) * local->spy_number)) 1549 range->num_frequency = 0;
1571 { 1550 range->max_qual.qual = 0;
1572 err = -EFAULT; 1551 range->max_qual.level = 255; /* What's the correct value ? */
1573 break; 1552 range->max_qual.noise = 255; /* Idem */
1574 } 1553 range->num_bitrates = 2;
1575 1554 range->bitrate[0] = 1000000; /* 1 Mb/s */
1576 /* Reset updated flags */ 1555 range->bitrate[1] = 2000000; /* 2 Mb/s */
1577 for(i = 0; i < local->spy_number; i++) 1556 return 0;
1578 local->spy_stat[i].updated = 0x0; 1557}
1579 } /* if(pointer != NULL) */
1580
1581 break;
1582#endif /* WIRELESS_SPY */
1583 1558
1584 /* ------------------ PRIVATE IOCTL ------------------ */ 1559/*------------------------------------------------------------------*/
1585#ifndef SIOCIWFIRSTPRIV 1560/*
1586#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE 1561 * Wireless Private Handler : set framing mode
1587#endif /* SIOCIWFIRSTPRIV */ 1562 */
1588#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */ 1563static int ray_set_framing(struct net_device *dev,
1589#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */ 1564 struct iw_request_info *info,
1590#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */ 1565 union iwreq_data *wrqu,
1591 case SIOCSIPFRAMING: 1566 char *extra)
1592 if(!capable(CAP_NET_ADMIN)) /* For private IOCTLs, we need to check permissions */ 1567{
1593 { 1568 translate = *(extra); /* Set framing mode */
1594 err = -EPERM;
1595 break;
1596 }
1597 translate = *(wrq->u.name); /* Set framing mode */
1598 break;
1599 case SIOCGIPFRAMING:
1600 *(wrq->u.name) = translate;
1601 break;
1602 case SIOCGIPCOUNTRY:
1603 *(wrq->u.name) = country;
1604 break;
1605 case SIOCGIWPRIV:
1606 /* Export our "private" intercace */
1607 if(wrq->u.data.pointer != (caddr_t) 0)
1608 {
1609 struct iw_priv_args priv[] =
1610 { /* cmd, set_args, get_args, name */
1611 { SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "set_framing" },
1612 { SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_framing" },
1613 { SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_country" },
1614 };
1615 /* Set the number of ioctl available */
1616 wrq->u.data.length = 3;
1617 /* Copy structure to the user buffer */
1618 if(copy_to_user(wrq->u.data.pointer, (u_char *) priv,
1619 sizeof(priv)))
1620 err = -EFAULT;
1621 }
1622 break;
1623#endif /* WIRELESS_EXT > 7 */
1624 1569
1570 return 0;
1571}
1625 1572
1626 default: 1573/*------------------------------------------------------------------*/
1627 DEBUG(0,"ray_dev_ioctl cmd = 0x%x\n", cmd); 1574/*
1628 err = -EOPNOTSUPP; 1575 * Wireless Private Handler : get framing mode
1629 } 1576 */
1630 return err; 1577static int ray_get_framing(struct net_device *dev,
1631} /* end ray_dev_ioctl */ 1578 struct iw_request_info *info,
1632/*===========================================================================*/ 1579 union iwreq_data *wrqu,
1633#if WIRELESS_EXT > 7 /* If wireless extension exist in the kernel */ 1580 char *extra)
1581{
1582 *(extra) = translate;
1583
1584 return 0;
1585}
1586
1587/*------------------------------------------------------------------*/
1588/*
1589 * Wireless Private Handler : get country
1590 */
1591static int ray_get_country(struct net_device *dev,
1592 struct iw_request_info *info,
1593 union iwreq_data *wrqu,
1594 char *extra)
1595{
1596 *(extra) = country;
1597
1598 return 0;
1599}
1600
1601/*------------------------------------------------------------------*/
1602/*
1603 * Commit handler : called after a bunch of SET operations
1604 */
1605static int ray_commit(struct net_device *dev,
1606 struct iw_request_info *info, /* NULL */
1607 void *zwrq, /* NULL */
1608 char *extra) /* NULL */
1609{
1610 return 0;
1611}
1612
1613/*------------------------------------------------------------------*/
1614/*
1615 * Stats handler : return Wireless Stats
1616 */
1634static iw_stats * ray_get_wireless_stats(struct net_device * dev) 1617static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1635{ 1618{
1636 ray_dev_t * local = (ray_dev_t *) dev->priv; 1619 ray_dev_t * local = (ray_dev_t *) dev->priv;
@@ -1642,13 +1625,13 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1642 1625
1643 local->wstats.status = local->card_status; 1626 local->wstats.status = local->card_status;
1644#ifdef WIRELESS_SPY 1627#ifdef WIRELESS_SPY
1645 if((local->spy_number > 0) && (local->sparm.b5.a_network_type == 0)) 1628 if((local->spy_data.spy_number > 0) && (local->sparm.b5.a_network_type == 0))
1646 { 1629 {
1647 /* Get it from the first node in spy list */ 1630 /* Get it from the first node in spy list */
1648 local->wstats.qual.qual = local->spy_stat[0].qual; 1631 local->wstats.qual.qual = local->spy_data.spy_stat[0].qual;
1649 local->wstats.qual.level = local->spy_stat[0].level; 1632 local->wstats.qual.level = local->spy_data.spy_stat[0].level;
1650 local->wstats.qual.noise = local->spy_stat[0].noise; 1633 local->wstats.qual.noise = local->spy_data.spy_stat[0].noise;
1651 local->wstats.qual.updated = local->spy_stat[0].updated; 1634 local->wstats.qual.updated = local->spy_data.spy_stat[0].updated;
1652 } 1635 }
1653#endif /* WIRELESS_SPY */ 1636#endif /* WIRELESS_SPY */
1654 1637
@@ -1659,7 +1642,65 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1659 1642
1660 return &local->wstats; 1643 return &local->wstats;
1661} /* end ray_get_wireless_stats */ 1644} /* end ray_get_wireless_stats */
1662#endif /* WIRELESS_EXT > 7 */ 1645
1646/*------------------------------------------------------------------*/
1647/*
1648 * Structures to export the Wireless Handlers
1649 */
1650
1651static const iw_handler ray_handler[] = {
1652 [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit,
1653 [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) ray_get_name,
1654 [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) ray_set_freq,
1655 [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) ray_get_freq,
1656 [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) ray_set_mode,
1657 [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) ray_get_mode,
1658 [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range,
1659#ifdef WIRELESS_SPY
1660 [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_set_spy,
1661 [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_get_spy,
1662 [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy,
1663 [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy,
1664#endif /* WIRELESS_SPY */
1665 [SIOCGIWAP -SIOCIWFIRST] (iw_handler) ray_get_wap,
1666 [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid,
1667 [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid,
1668 [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) ray_set_rate,
1669 [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) ray_get_rate,
1670 [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) ray_set_rts,
1671 [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) ray_get_rts,
1672 [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) ray_set_frag,
1673 [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) ray_get_frag,
1674};
1675
1676#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
1677#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */
1678#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
1679
1680static const iw_handler ray_private_handler[] = {
1681 [0] (iw_handler) ray_set_framing,
1682 [1] (iw_handler) ray_get_framing,
1683 [3] (iw_handler) ray_get_country,
1684};
1685
1686static const struct iw_priv_args ray_private_args[] = {
1687/* cmd, set_args, get_args, name */
1688{ SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0, "set_framing" },
1689{ SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_framing" },
1690{ SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "get_country" },
1691};
1692
1693static const struct iw_handler_def ray_handler_def =
1694{
1695 .num_standard = sizeof(ray_handler)/sizeof(iw_handler),
1696 .num_private = sizeof(ray_private_handler)/sizeof(iw_handler),
1697 .num_private_args = sizeof(ray_private_args)/sizeof(struct iw_priv_args),
1698 .standard = ray_handler,
1699 .private = ray_private_handler,
1700 .private_args = ray_private_args,
1701 .get_wireless_stats = ray_get_wireless_stats,
1702};
1703
1663/*===========================================================================*/ 1704/*===========================================================================*/
1664static int ray_open(struct net_device *dev) 1705static int ray_open(struct net_device *dev)
1665{ 1706{
@@ -2392,20 +2433,15 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
2392 /*local->wstats.qual.noise = none ? */ 2433 /*local->wstats.qual.noise = none ? */
2393 local->wstats.qual.updated = 0x2; 2434 local->wstats.qual.updated = 0x2;
2394 } 2435 }
2395 /* Now, for the addresses in the spy list */ 2436 /* Now, update the spy stuff */
2396 { 2437 {
2397 int i; 2438 struct iw_quality wstats;
2398 /* Look all addresses */ 2439 wstats.level = siglev;
2399 for(i = 0; i < local->spy_number; i++) 2440 /* wstats.noise = none ? */
2400 /* If match */ 2441 /* wstats.qual = none ? */
2401 if(!memcmp(linksrcaddr, local->spy_address[i], ETH_ALEN)) 2442 wstats.updated = 0x2;
2402 { 2443 /* Update spy records */
2403 /* Update statistics */ 2444 wireless_spy_update(dev, linksrcaddr, &wstats);
2404 /*local->spy_stat[i].qual = none ? */
2405 local->spy_stat[i].level = siglev;
2406 /*local->spy_stat[i].noise = none ? */
2407 local->spy_stat[i].updated = 0x2;
2408 }
2409 } 2445 }
2410#endif /* WIRELESS_SPY */ 2446#endif /* WIRELESS_SPY */
2411} /* end rx_data */ 2447} /* end rx_data */
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index c77afa14fa86..42660fe64bfd 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -63,13 +63,10 @@ typedef struct ray_dev_t {
63 UCHAR last_rsl; 63 UCHAR last_rsl;
64 int beacon_rxed; 64 int beacon_rxed;
65 struct beacon_rx last_bcn; 65 struct beacon_rx last_bcn;
66#ifdef WIRELESS_EXT
67 iw_stats wstats; /* Wireless specific stats */ 66 iw_stats wstats; /* Wireless specific stats */
68#endif
69#ifdef WIRELESS_SPY 67#ifdef WIRELESS_SPY
70 int spy_number; /* Number of addresses to spy */ 68 struct iw_spy_data spy_data;
71 mac_addr spy_address[IW_MAX_SPY + 1]; /* The addresses to spy */ 69 struct iw_public_data wireless_data;
72 iw_qual spy_stat[IW_MAX_SPY + 1]; /* Statistics gathered */
73#endif /* WIRELESS_SPY */ 70#endif /* WIRELESS_SPY */
74 71
75} ray_dev_t; 72} ray_dev_t;
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index b5719437e981..7fcbe589c3f2 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -609,6 +609,7 @@ struct wl3501_card {
609 struct net_device_stats stats; 609 struct net_device_stats stats;
610 struct iw_statistics wstats; 610 struct iw_statistics wstats;
611 struct iw_spy_data spy_data; 611 struct iw_spy_data spy_data;
612 struct iw_public_data wireless_data;
612 struct dev_node_t node; 613 struct dev_node_t node;
613}; 614};
614#endif 615#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 7cc5edbf6ede..3f8c27f0871b 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1944,7 +1944,7 @@ static const iw_handler wl3501_handler[] = {
1944static const struct iw_handler_def wl3501_handler_def = { 1944static const struct iw_handler_def wl3501_handler_def = {
1945 .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler), 1945 .num_standard = sizeof(wl3501_handler) / sizeof(iw_handler),
1946 .standard = (iw_handler *)wl3501_handler, 1946 .standard = (iw_handler *)wl3501_handler,
1947 .spy_offset = offsetof(struct wl3501_card, spy_data), 1947 .get_wireless_stats = wl3501_get_wireless_stats,
1948}; 1948};
1949 1949
1950/** 1950/**
@@ -1961,6 +1961,7 @@ static dev_link_t *wl3501_attach(void)
1961 client_reg_t client_reg; 1961 client_reg_t client_reg;
1962 dev_link_t *link; 1962 dev_link_t *link;
1963 struct net_device *dev; 1963 struct net_device *dev;
1964 struct wl3501_card *this;
1964 int ret; 1965 int ret;
1965 1966
1966 /* Initialize the dev_link_t structure */ 1967 /* Initialize the dev_link_t structure */
@@ -1995,7 +1996,9 @@ static dev_link_t *wl3501_attach(void)
1995 dev->tx_timeout = wl3501_tx_timeout; 1996 dev->tx_timeout = wl3501_tx_timeout;
1996 dev->watchdog_timeo = 5 * HZ; 1997 dev->watchdog_timeo = 5 * HZ;
1997 dev->get_stats = wl3501_get_stats; 1998 dev->get_stats = wl3501_get_stats;
1998 dev->get_wireless_stats = wl3501_get_wireless_stats; 1999 this = dev->priv;
2000 this->wireless_data.spy_data = &this->spy_data;
2001 dev->wireless_data = &this->wireless_data;
1999 dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def; 2002 dev->wireless_handlers = (struct iw_handler_def *)&wl3501_handler_def;
2000 SET_ETHTOOL_OPS(dev, &ops); 2003 SET_ETHTOOL_OPS(dev, &ops);
2001 netif_stop_queue(dev); 2004 netif_stop_queue(dev);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 4598c6a9212d..97f723179f62 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2739,6 +2739,7 @@ enum parport_pc_pci_cards {
2739 syba_2p_epp, 2739 syba_2p_epp,
2740 syba_1p_ecp, 2740 syba_1p_ecp,
2741 titan_010l, 2741 titan_010l,
2742 titan_1284p1,
2742 titan_1284p2, 2743 titan_1284p2,
2743 avlab_1p, 2744 avlab_1p,
2744 avlab_2p, 2745 avlab_2p,
@@ -2811,6 +2812,7 @@ static struct parport_pc_pci {
2811 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } }, 2812 /* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
2812 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } }, 2813 /* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
2813 /* titan_010l */ { 1, { { 3, -1 }, } }, 2814 /* titan_010l */ { 1, { { 3, -1 }, } },
2815 /* titan_1284p1 */ { 1, { { 0, 1 }, } },
2814 /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } }, 2816 /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
2815 /* avlab_1p */ { 1, { { 0, 1}, } }, 2817 /* avlab_1p */ { 1, { { 0, 1}, } },
2816 /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} }, 2818 /* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
@@ -2884,6 +2886,7 @@ static struct pci_device_id parport_pc_pci_tbl[] = {
2884 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp }, 2886 PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
2885 { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L, 2887 { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
2886 PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l }, 2888 PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
2889 { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
2887 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 }, 2890 { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
2888 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/ 2891 /* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
2889 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */ 2892 { 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p}, /* AFAVLAB_TK9902 */
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2b85aa39f954..532f73bb2224 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -91,6 +91,7 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
91{ 91{
92 struct msi_desc *entry; 92 struct msi_desc *entry;
93 struct msg_address address; 93 struct msg_address address;
94 unsigned int irq = vector;
94 95
95 entry = (struct msi_desc *)msi_desc[vector]; 96 entry = (struct msi_desc *)msi_desc[vector];
96 if (!entry || !entry->dev) 97 if (!entry || !entry->dev)
@@ -112,6 +113,7 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
112 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); 113 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
113 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 114 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
114 address.lo_address.value); 115 address.lo_address.value);
116 set_native_irq_info(irq, cpu_mask);
115 break; 117 break;
116 } 118 }
117 case PCI_CAP_ID_MSIX: 119 case PCI_CAP_ID_MSIX:
@@ -125,22 +127,13 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
125 MSI_TARGET_CPU_SHIFT); 127 MSI_TARGET_CPU_SHIFT);
126 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask); 128 entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
127 writel(address.lo_address.value, entry->mask_base + offset); 129 writel(address.lo_address.value, entry->mask_base + offset);
130 set_native_irq_info(irq, cpu_mask);
128 break; 131 break;
129 } 132 }
130 default: 133 default:
131 break; 134 break;
132 } 135 }
133} 136}
134
135#ifdef CONFIG_IRQBALANCE
136static inline void move_msi(int vector)
137{
138 if (!cpus_empty(pending_irq_balance_cpumask[vector])) {
139 set_msi_affinity(vector, pending_irq_balance_cpumask[vector]);
140 cpus_clear(pending_irq_balance_cpumask[vector]);
141 }
142}
143#endif /* CONFIG_IRQBALANCE */
144#endif /* CONFIG_SMP */ 137#endif /* CONFIG_SMP */
145 138
146static void mask_MSI_irq(unsigned int vector) 139static void mask_MSI_irq(unsigned int vector)
@@ -191,13 +184,13 @@ static void shutdown_msi_irq(unsigned int vector)
191 184
192static void end_msi_irq_wo_maskbit(unsigned int vector) 185static void end_msi_irq_wo_maskbit(unsigned int vector)
193{ 186{
194 move_msi(vector); 187 move_native_irq(vector);
195 ack_APIC_irq(); 188 ack_APIC_irq();
196} 189}
197 190
198static void end_msi_irq_w_maskbit(unsigned int vector) 191static void end_msi_irq_w_maskbit(unsigned int vector)
199{ 192{
200 move_msi(vector); 193 move_native_irq(vector);
201 unmask_MSI_irq(vector); 194 unmask_MSI_irq(vector);
202 ack_APIC_irq(); 195 ack_APIC_irq();
203} 196}
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 390f1851c0f1..402136a5c9e4 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -19,7 +19,6 @@
19#define NR_HP_RESERVED_VECTORS 20 19#define NR_HP_RESERVED_VECTORS 20
20 20
21extern int vector_irq[NR_VECTORS]; 21extern int vector_irq[NR_VECTORS];
22extern cpumask_t pending_irq_balance_cpumask[NR_IRQS];
23extern void (*interrupt[NR_IRQS])(void); 22extern void (*interrupt[NR_IRQS])(void);
24extern int pci_vector_resources(int last, int nr_released); 23extern int pci_vector_resources(int last, int nr_released);
25 24
@@ -29,10 +28,6 @@ extern int pci_vector_resources(int last, int nr_released);
29#define set_msi_irq_affinity NULL 28#define set_msi_irq_affinity NULL
30#endif 29#endif
31 30
32#ifndef CONFIG_IRQBALANCE
33static inline void move_msi(int vector) {}
34#endif
35
36/* 31/*
37 * MSI-X Address Register 32 * MSI-X Address Register
38 */ 33 */
diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
index be420bb29113..edccfa5bb400 100644
--- a/drivers/pcmcia/topic.h
+++ b/drivers/pcmcia/topic.h
@@ -101,6 +101,8 @@
101#define TOPIC97_AVS_AUDIO_CONTROL 0x02 101#define TOPIC97_AVS_AUDIO_CONTROL 0x02
102#define TOPIC97_AVS_VIDEO_CONTROL 0x01 102#define TOPIC97_AVS_VIDEO_CONTROL 0x01
103 103
104#define TOPIC_EXCA_IF_CONTROL 0x3e /* 8 bit */
105#define TOPIC_EXCA_IFC_33V_ENA 0x01
104 106
105static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff) 107static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
106{ 108{
@@ -137,4 +139,19 @@ static int topic97_override(struct yenta_socket *socket)
137 return 0; 139 return 0;
138} 140}
139 141
142
143static int topic95_override(struct yenta_socket *socket)
144{
145 u8 fctrl;
146
147 /* enable 3.3V support for 16bit cards */
148 fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
149 exca_writeb(socket, TOPIC_EXCA_IF_CONTROL, fctrl | TOPIC_EXCA_IFC_33V_ENA);
150
151 /* tell yenta to use exca registers to power 16bit cards */
152 socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
153
154 return 0;
155}
156
140#endif /* _LINUX_TOPIC_H */ 157#endif /* _LINUX_TOPIC_H */
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 62fd705203fb..0347a29f297b 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -184,22 +184,52 @@ static int yenta_get_status(struct pcmcia_socket *sock, unsigned int *value)
184 return 0; 184 return 0;
185} 185}
186 186
187static int yenta_Vcc_power(u32 control) 187static void yenta_get_power(struct yenta_socket *socket, socket_state_t *state)
188{ 188{
189 switch (control & CB_SC_VCC_MASK) { 189 if (!(cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) &&
190 case CB_SC_VCC_5V: return 50; 190 (socket->flags & YENTA_16BIT_POWER_EXCA)) {
191 case CB_SC_VCC_3V: return 33; 191 u8 reg, vcc, vpp;
192 default: return 0; 192
193 } 193 reg = exca_readb(socket, I365_POWER);
194} 194 vcc = reg & I365_VCC_MASK;
195 vpp = reg & I365_VPP1_MASK;
196 state->Vcc = state->Vpp = 0;
197
198 if (socket->flags & YENTA_16BIT_POWER_DF) {
199 if (vcc == I365_VCC_3V)
200 state->Vcc = 33;
201 if (vcc == I365_VCC_5V)
202 state->Vcc = 50;
203 if (vpp == I365_VPP1_5V)
204 state->Vpp = state->Vcc;
205 if (vpp == I365_VPP1_12V)
206 state->Vpp = 120;
207 } else {
208 if (reg & I365_VCC_5V) {
209 state->Vcc = 50;
210 if (vpp == I365_VPP1_5V)
211 state->Vpp = 50;
212 if (vpp == I365_VPP1_12V)
213 state->Vpp = 120;
214 }
215 }
216 } else {
217 u32 control;
195 218
196static int yenta_Vpp_power(u32 control) 219 control = cb_readl(socket, CB_SOCKET_CONTROL);
197{ 220
198 switch (control & CB_SC_VPP_MASK) { 221 switch (control & CB_SC_VCC_MASK) {
199 case CB_SC_VPP_12V: return 120; 222 case CB_SC_VCC_5V: state->Vcc = 50; break;
200 case CB_SC_VPP_5V: return 50; 223 case CB_SC_VCC_3V: state->Vcc = 33; break;
201 case CB_SC_VPP_3V: return 33; 224 default: state->Vcc = 0;
202 default: return 0; 225 }
226
227 switch (control & CB_SC_VPP_MASK) {
228 case CB_SC_VPP_12V: state->Vpp = 120; break;
229 case CB_SC_VPP_5V: state->Vpp = 50; break;
230 case CB_SC_VPP_3V: state->Vpp = 33; break;
231 default: state->Vpp = 0;
232 }
203 } 233 }
204} 234}
205 235
@@ -211,8 +241,7 @@ static int yenta_get_socket(struct pcmcia_socket *sock, socket_state_t *state)
211 241
212 control = cb_readl(socket, CB_SOCKET_CONTROL); 242 control = cb_readl(socket, CB_SOCKET_CONTROL);
213 243
214 state->Vcc = yenta_Vcc_power(control); 244 yenta_get_power(socket, state);
215 state->Vpp = yenta_Vpp_power(control);
216 state->io_irq = socket->io_irq; 245 state->io_irq = socket->io_irq;
217 246
218 if (cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) { 247 if (cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) {
@@ -246,19 +275,54 @@ static int yenta_get_socket(struct pcmcia_socket *sock, socket_state_t *state)
246 275
247static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state) 276static void yenta_set_power(struct yenta_socket *socket, socket_state_t *state)
248{ 277{
249 u32 reg = 0; /* CB_SC_STPCLK? */ 278 /* some birdges require to use the ExCA registers to power 16bit cards */
250 switch (state->Vcc) { 279 if (!(cb_readl(socket, CB_SOCKET_STATE) & CB_CBCARD) &&
251 case 33: reg = CB_SC_VCC_3V; break; 280 (socket->flags & YENTA_16BIT_POWER_EXCA)) {
252 case 50: reg = CB_SC_VCC_5V; break; 281 u8 reg, old;
253 default: reg = 0; break; 282 reg = old = exca_readb(socket, I365_POWER);
254 } 283 reg &= ~(I365_VCC_MASK | I365_VPP1_MASK | I365_VPP2_MASK);
255 switch (state->Vpp) { 284
256 case 33: reg |= CB_SC_VPP_3V; break; 285 /* i82365SL-DF style */
257 case 50: reg |= CB_SC_VPP_5V; break; 286 if (socket->flags & YENTA_16BIT_POWER_DF) {
258 case 120: reg |= CB_SC_VPP_12V; break; 287 switch (state->Vcc) {
288 case 33: reg |= I365_VCC_3V; break;
289 case 50: reg |= I365_VCC_5V; break;
290 default: reg = 0; break;
291 }
292 switch (state->Vpp) {
293 case 33:
294 case 50: reg |= I365_VPP1_5V; break;
295 case 120: reg |= I365_VPP1_12V; break;
296 }
297 } else {
298 /* i82365SL-B style */
299 switch (state->Vcc) {
300 case 50: reg |= I365_VCC_5V; break;
301 default: reg = 0; break;
302 }
303 switch (state->Vpp) {
304 case 50: reg |= I365_VPP1_5V | I365_VPP2_5V; break;
305 case 120: reg |= I365_VPP1_12V | I365_VPP2_12V; break;
306 }
307 }
308
309 if (reg != old)
310 exca_writeb(socket, I365_POWER, reg);
311 } else {
312 u32 reg = 0; /* CB_SC_STPCLK? */
313 switch (state->Vcc) {
314 case 33: reg = CB_SC_VCC_3V; break;
315 case 50: reg = CB_SC_VCC_5V; break;
316 default: reg = 0; break;
317 }
318 switch (state->Vpp) {
319 case 33: reg |= CB_SC_VPP_3V; break;
320 case 50: reg |= CB_SC_VPP_5V; break;
321 case 120: reg |= CB_SC_VPP_12V; break;
322 }
323 if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
324 cb_writel(socket, CB_SOCKET_CONTROL, reg);
259 } 325 }
260 if (reg != cb_readl(socket, CB_SOCKET_CONTROL))
261 cb_writel(socket, CB_SOCKET_CONTROL, reg);
262} 326}
263 327
264static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state) 328static int yenta_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
@@ -751,6 +815,7 @@ enum {
751 CARDBUS_TYPE_TI12XX, 815 CARDBUS_TYPE_TI12XX,
752 CARDBUS_TYPE_TI1250, 816 CARDBUS_TYPE_TI1250,
753 CARDBUS_TYPE_RICOH, 817 CARDBUS_TYPE_RICOH,
818 CARDBUS_TYPE_TOPIC95,
754 CARDBUS_TYPE_TOPIC97, 819 CARDBUS_TYPE_TOPIC97,
755 CARDBUS_TYPE_O2MICRO, 820 CARDBUS_TYPE_O2MICRO,
756}; 821};
@@ -789,6 +854,9 @@ static struct cardbus_type cardbus_type[] = {
789 .save_state = ricoh_save_state, 854 .save_state = ricoh_save_state,
790 .restore_state = ricoh_restore_state, 855 .restore_state = ricoh_restore_state,
791 }, 856 },
857 [CARDBUS_TYPE_TOPIC95] = {
858 .override = topic95_override,
859 },
792 [CARDBUS_TYPE_TOPIC97] = { 860 [CARDBUS_TYPE_TOPIC97] = {
793 .override = topic97_override, 861 .override = topic97_override,
794 }, 862 },
@@ -1196,6 +1264,7 @@ static struct pci_device_id yenta_table [] = {
1196 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, RICOH), 1264 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, RICOH),
1197 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C478, RICOH), 1265 CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C478, RICOH),
1198 1266
1267 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC95, TOPIC95),
1199 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC97, TOPIC97), 1268 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC97, TOPIC97),
1200 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC100, TOPIC97), 1269 CB_ID(PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_TOSHIBA_TOPIC100, TOPIC97),
1201 1270
diff --git a/drivers/pcmcia/yenta_socket.h b/drivers/pcmcia/yenta_socket.h
index 4e637eef2076..4e75e9e258cd 100644
--- a/drivers/pcmcia/yenta_socket.h
+++ b/drivers/pcmcia/yenta_socket.h
@@ -95,6 +95,12 @@
95 */ 95 */
96#define CB_MEM_PAGE(map) (0x40 + (map)) 96#define CB_MEM_PAGE(map) (0x40 + (map))
97 97
98
99/* control how 16bit cards are powered */
100#define YENTA_16BIT_POWER_EXCA 0x00000001
101#define YENTA_16BIT_POWER_DF 0x00000002
102
103
98struct yenta_socket; 104struct yenta_socket;
99 105
100struct cardbus_type { 106struct cardbus_type {
@@ -113,6 +119,8 @@ struct yenta_socket {
113 struct pcmcia_socket socket; 119 struct pcmcia_socket socket;
114 struct cardbus_type *type; 120 struct cardbus_type *type;
115 121
122 u32 flags;
123
116 /* for PCI interrupt probing */ 124 /* for PCI interrupt probing */
117 unsigned int probe_status; 125 unsigned int probe_status;
118 126
diff --git a/drivers/pnp/card.c b/drivers/pnp/card.c
index 6e5229e92fbc..e95ed67d4f05 100644
--- a/drivers/pnp/card.c
+++ b/drivers/pnp/card.c
@@ -8,13 +8,6 @@
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11
12#ifdef CONFIG_PNP_DEBUG
13 #define DEBUG
14#else
15 #undef DEBUG
16#endif
17
18#include <linux/pnp.h> 11#include <linux/pnp.h>
19#include "base.h" 12#include "base.h"
20 13
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 1d037c2a82ac..33da25f3213f 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -11,13 +11,6 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/ctype.h> 12#include <linux/ctype.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14
15#ifdef CONFIG_PNP_DEBUG
16 #define DEBUG
17#else
18 #undef DEBUG
19#endif
20
21#include <linux/pnp.h> 14#include <linux/pnp.h>
22#include "base.h" 15#include "base.h"
23 16
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c
index 82c5edd5b9ee..beedd86800f4 100644
--- a/drivers/pnp/isapnp/core.c
+++ b/drivers/pnp/isapnp/core.c
@@ -142,17 +142,6 @@ static void isapnp_write_word(unsigned char idx, unsigned short val)
142 isapnp_write_byte(idx+1, val); 142 isapnp_write_byte(idx+1, val);
143} 143}
144 144
145static void *isapnp_alloc(long size)
146{
147 void *result;
148
149 result = kmalloc(size, GFP_KERNEL);
150 if (!result)
151 return NULL;
152 memset(result, 0, size);
153 return result;
154}
155
156static void isapnp_key(void) 145static void isapnp_key(void)
157{ 146{
158 unsigned char code = 0x6a, msb; 147 unsigned char code = 0x6a, msb;
@@ -406,7 +395,7 @@ static void isapnp_parse_id(struct pnp_dev * dev, unsigned short vendor, unsigne
406 struct pnp_id * id; 395 struct pnp_id * id;
407 if (!dev) 396 if (!dev)
408 return; 397 return;
409 id = isapnp_alloc(sizeof(struct pnp_id)); 398 id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
410 if (!id) 399 if (!id)
411 return; 400 return;
412 sprintf(id->id, "%c%c%c%x%x%x%x", 401 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -430,7 +419,7 @@ static struct pnp_dev * __init isapnp_parse_device(struct pnp_card *card, int si
430 struct pnp_dev *dev; 419 struct pnp_dev *dev;
431 420
432 isapnp_peek(tmp, size); 421 isapnp_peek(tmp, size);
433 dev = isapnp_alloc(sizeof(struct pnp_dev)); 422 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL);
434 if (!dev) 423 if (!dev)
435 return NULL; 424 return NULL;
436 dev->number = number; 425 dev->number = number;
@@ -461,7 +450,7 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
461 unsigned long bits; 450 unsigned long bits;
462 451
463 isapnp_peek(tmp, size); 452 isapnp_peek(tmp, size);
464 irq = isapnp_alloc(sizeof(struct pnp_irq)); 453 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
465 if (!irq) 454 if (!irq)
466 return; 455 return;
467 bits = (tmp[1] << 8) | tmp[0]; 456 bits = (tmp[1] << 8) | tmp[0];
@@ -485,7 +474,7 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
485 struct pnp_dma *dma; 474 struct pnp_dma *dma;
486 475
487 isapnp_peek(tmp, size); 476 isapnp_peek(tmp, size);
488 dma = isapnp_alloc(sizeof(struct pnp_dma)); 477 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
489 if (!dma) 478 if (!dma)
490 return; 479 return;
491 dma->map = tmp[0]; 480 dma->map = tmp[0];
@@ -505,7 +494,7 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
505 struct pnp_port *port; 494 struct pnp_port *port;
506 495
507 isapnp_peek(tmp, size); 496 isapnp_peek(tmp, size);
508 port = isapnp_alloc(sizeof(struct pnp_port)); 497 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
509 if (!port) 498 if (!port)
510 return; 499 return;
511 port->min = (tmp[2] << 8) | tmp[1]; 500 port->min = (tmp[2] << 8) | tmp[1];
@@ -528,7 +517,7 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
528 struct pnp_port *port; 517 struct pnp_port *port;
529 518
530 isapnp_peek(tmp, size); 519 isapnp_peek(tmp, size);
531 port = isapnp_alloc(sizeof(struct pnp_port)); 520 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
532 if (!port) 521 if (!port)
533 return; 522 return;
534 port->min = port->max = (tmp[1] << 8) | tmp[0]; 523 port->min = port->max = (tmp[1] << 8) | tmp[0];
@@ -550,7 +539,7 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
550 struct pnp_mem *mem; 539 struct pnp_mem *mem;
551 540
552 isapnp_peek(tmp, size); 541 isapnp_peek(tmp, size);
553 mem = isapnp_alloc(sizeof(struct pnp_mem)); 542 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
554 if (!mem) 543 if (!mem)
555 return; 544 return;
556 mem->min = ((tmp[2] << 8) | tmp[1]) << 8; 545 mem->min = ((tmp[2] << 8) | tmp[1]) << 8;
@@ -573,7 +562,7 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
573 struct pnp_mem *mem; 562 struct pnp_mem *mem;
574 563
575 isapnp_peek(tmp, size); 564 isapnp_peek(tmp, size);
576 mem = isapnp_alloc(sizeof(struct pnp_mem)); 565 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
577 if (!mem) 566 if (!mem)
578 return; 567 return;
579 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 568 mem->min = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -595,7 +584,7 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
595 struct pnp_mem *mem; 584 struct pnp_mem *mem;
596 585
597 isapnp_peek(tmp, size); 586 isapnp_peek(tmp, size);
598 mem = isapnp_alloc(sizeof(struct pnp_mem)); 587 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
599 if (!mem) 588 if (!mem)
600 return; 589 return;
601 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1]; 590 mem->min = mem->max = (tmp[4] << 24) | (tmp[3] << 16) | (tmp[2] << 8) | tmp[1];
@@ -838,7 +827,7 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
838 827
839static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device) 828static void isapnp_parse_card_id(struct pnp_card * card, unsigned short vendor, unsigned short device)
840{ 829{
841 struct pnp_id * id = isapnp_alloc(sizeof(struct pnp_id)); 830 struct pnp_id * id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
842 if (!id) 831 if (!id)
843 return; 832 return;
844 sprintf(id->id, "%c%c%c%x%x%x%x", 833 sprintf(id->id, "%c%c%c%x%x%x%x",
@@ -874,7 +863,7 @@ static int __init isapnp_build_device_list(void)
874 header[4], header[5], header[6], header[7], header[8]); 863 header[4], header[5], header[6], header[7], header[8]);
875 printk(KERN_DEBUG "checksum = 0x%x\n", checksum); 864 printk(KERN_DEBUG "checksum = 0x%x\n", checksum);
876#endif 865#endif
877 if ((card = isapnp_alloc(sizeof(struct pnp_card))) == NULL) 866 if ((card = kcalloc(1, sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
878 continue; 867 continue;
879 868
880 card->number = csn; 869 card->number = csn;
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index 6c510c19ad7d..94442ffd4aed 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -11,13 +11,6 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14
15#ifdef CONFIG_PNP_DEBUG
16 #define DEBUG
17#else
18 #undef DEBUG
19#endif
20
21#include <linux/pnp.h> 14#include <linux/pnp.h>
22#include "base.h" 15#include "base.h"
23 16
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 8655dd2e5b83..1a8915e74160 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -19,6 +19,7 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22#include <linux/config.h>
22#include <linux/acpi.h> 23#include <linux/acpi.h>
23#include <linux/pnp.h> 24#include <linux/pnp.h>
24#include <acpi/acpi_bus.h> 25#include <acpi/acpi_bus.h>
@@ -41,14 +42,6 @@ static inline int is_exclusive_device(struct acpi_device *dev)
41 return (!acpi_match_ids(dev, excluded_id_list)); 42 return (!acpi_match_ids(dev, excluded_id_list));
42} 43}
43 44
44void *pnpacpi_kmalloc(size_t size, int f)
45{
46 void *p = kmalloc(size, f);
47 if (p)
48 memset(p, 0, size);
49 return p;
50}
51
52/* 45/*
53 * Compatible Device IDs 46 * Compatible Device IDs
54 */ 47 */
@@ -143,7 +136,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
143 return 0; 136 return 0;
144 137
145 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device)); 138 pnp_dbg("ACPI device : hid %s", acpi_device_hid(device));
146 dev = pnpacpi_kmalloc(sizeof(struct pnp_dev), GFP_KERNEL); 139 dev = kcalloc(1, sizeof(struct pnp_dev), GFP_KERNEL);
147 if (!dev) { 140 if (!dev) {
148 pnp_err("Out of memory"); 141 pnp_err("Out of memory");
149 return -ENOMEM; 142 return -ENOMEM;
@@ -173,7 +166,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
173 dev->number = num; 166 dev->number = num;
174 167
175 /* set the initial values for the PnP device */ 168 /* set the initial values for the PnP device */
176 dev_id = pnpacpi_kmalloc(sizeof(struct pnp_id), GFP_KERNEL); 169 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
177 if (!dev_id) 170 if (!dev_id)
178 goto err; 171 goto err;
179 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id); 172 pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
@@ -205,8 +198,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
205 for (i = 0; i < cid_list->count; i++) { 198 for (i = 0; i < cid_list->count; i++) {
206 if (!ispnpidacpi(cid_list->id[i].value)) 199 if (!ispnpidacpi(cid_list->id[i].value))
207 continue; 200 continue;
208 dev_id = pnpacpi_kmalloc(sizeof(struct pnp_id), 201 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
209 GFP_KERNEL);
210 if (!dev_id) 202 if (!dev_id)
211 continue; 203 continue;
212 204
diff --git a/drivers/pnp/pnpacpi/pnpacpi.h b/drivers/pnp/pnpacpi/pnpacpi.h
index 76f907e09ee6..f28e2ed66fa3 100644
--- a/drivers/pnp/pnpacpi/pnpacpi.h
+++ b/drivers/pnp/pnpacpi/pnpacpi.h
@@ -5,7 +5,6 @@
5#include <linux/acpi.h> 5#include <linux/acpi.h>
6#include <linux/pnp.h> 6#include <linux/pnp.h>
7 7
8void *pnpacpi_kmalloc(size_t size, int f);
9acpi_status pnpacpi_parse_allocated_resource(acpi_handle, struct pnp_resource_table*); 8acpi_status pnpacpi_parse_allocated_resource(acpi_handle, struct pnp_resource_table*);
10acpi_status pnpacpi_parse_resource_option_data(acpi_handle, struct pnp_dev*); 9acpi_status pnpacpi_parse_resource_option_data(acpi_handle, struct pnp_dev*);
11int pnpacpi_encode_resources(struct pnp_resource_table *, struct acpi_buffer *); 10int pnpacpi_encode_resources(struct pnp_resource_table *, struct acpi_buffer *);
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 75575f6c349c..675b76a42403 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -244,7 +244,7 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option, struct acpi_reso
244 244
245 if (p->number_of_channels == 0) 245 if (p->number_of_channels == 0)
246 return; 246 return;
247 dma = pnpacpi_kmalloc(sizeof(struct pnp_dma), GFP_KERNEL); 247 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
248 if (!dma) 248 if (!dma)
249 return; 249 return;
250 250
@@ -300,7 +300,7 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
300 300
301 if (p->number_of_interrupts == 0) 301 if (p->number_of_interrupts == 0)
302 return; 302 return;
303 irq = pnpacpi_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL); 303 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
304 if (!irq) 304 if (!irq)
305 return; 305 return;
306 306
@@ -321,7 +321,7 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
321 321
322 if (p->number_of_interrupts == 0) 322 if (p->number_of_interrupts == 0)
323 return; 323 return;
324 irq = pnpacpi_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL); 324 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
325 if (!irq) 325 if (!irq)
326 return; 326 return;
327 327
@@ -342,7 +342,7 @@ pnpacpi_parse_port_option(struct pnp_option *option,
342 342
343 if (io->range_length == 0) 343 if (io->range_length == 0)
344 return; 344 return;
345 port = pnpacpi_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 345 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
346 if (!port) 346 if (!port)
347 return; 347 return;
348 port->min = io->min_base_address; 348 port->min = io->min_base_address;
@@ -363,7 +363,7 @@ pnpacpi_parse_fixed_port_option(struct pnp_option *option,
363 363
364 if (io->range_length == 0) 364 if (io->range_length == 0)
365 return; 365 return;
366 port = pnpacpi_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 366 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
367 if (!port) 367 if (!port)
368 return; 368 return;
369 port->min = port->max = io->base_address; 369 port->min = port->max = io->base_address;
@@ -382,7 +382,7 @@ pnpacpi_parse_mem24_option(struct pnp_option *option,
382 382
383 if (p->range_length == 0) 383 if (p->range_length == 0)
384 return; 384 return;
385 mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 385 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
386 if (!mem) 386 if (!mem)
387 return; 387 return;
388 mem->min = p->min_base_address; 388 mem->min = p->min_base_address;
@@ -405,7 +405,7 @@ pnpacpi_parse_mem32_option(struct pnp_option *option,
405 405
406 if (p->range_length == 0) 406 if (p->range_length == 0)
407 return; 407 return;
408 mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 408 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
409 if (!mem) 409 if (!mem)
410 return; 410 return;
411 mem->min = p->min_base_address; 411 mem->min = p->min_base_address;
@@ -428,7 +428,7 @@ pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
428 428
429 if (p->range_length == 0) 429 if (p->range_length == 0)
430 return; 430 return;
431 mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 431 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
432 if (!mem) 432 if (!mem)
433 return; 433 return;
434 mem->min = mem->max = p->range_base_address; 434 mem->min = mem->max = p->range_base_address;
@@ -612,7 +612,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
612 if (!res_cnt) 612 if (!res_cnt)
613 return -EINVAL; 613 return -EINVAL;
614 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1; 614 buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
615 buffer->pointer = pnpacpi_kmalloc(buffer->length - 1, GFP_KERNEL); 615 buffer->pointer = kcalloc(1, buffer->length - 1, GFP_KERNEL);
616 if (!buffer->pointer) 616 if (!buffer->pointer)
617 return -ENOMEM; 617 return -ENOMEM;
618 pnp_dbg("Res cnt %d", res_cnt); 618 pnp_dbg("Res cnt %d", res_cnt);
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 778a324028f4..f49674f07949 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -86,16 +86,6 @@ int pnp_bios_present(void)
86 86
87struct pnp_dev_node_info node_info; 87struct pnp_dev_node_info node_info;
88 88
89void *pnpbios_kmalloc(size_t size, int f)
90{
91 void *p = kmalloc( size, f );
92 if ( p == NULL )
93 printk(KERN_ERR "PnPBIOS: kmalloc() failed\n");
94 else
95 memset(p, 0, size);
96 return p;
97}
98
99/* 89/*
100 * 90 *
101 * DOCKING FUNCTIONS 91 * DOCKING FUNCTIONS
@@ -121,10 +111,10 @@ static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
121 if (!current->fs->root) { 111 if (!current->fs->root) {
122 return -EAGAIN; 112 return -EAGAIN;
123 } 113 }
124 if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) { 114 if (!(envp = (char **) kcalloc (20, sizeof (char *), GFP_KERNEL))) {
125 return -ENOMEM; 115 return -ENOMEM;
126 } 116 }
127 if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) { 117 if (!(buf = kcalloc (1, 256, GFP_KERNEL))) {
128 kfree (envp); 118 kfree (envp);
129 return -ENOMEM; 119 return -ENOMEM;
130 } 120 }
@@ -231,7 +221,7 @@ static int pnpbios_get_resources(struct pnp_dev * dev, struct pnp_resource_table
231 if(!pnpbios_is_dynamic(dev)) 221 if(!pnpbios_is_dynamic(dev))
232 return -EPERM; 222 return -EPERM;
233 223
234 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 224 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
235 if (!node) 225 if (!node)
236 return -1; 226 return -1;
237 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 227 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -254,7 +244,7 @@ static int pnpbios_set_resources(struct pnp_dev * dev, struct pnp_resource_table
254 if (!pnpbios_is_dynamic(dev)) 244 if (!pnpbios_is_dynamic(dev))
255 return -EPERM; 245 return -EPERM;
256 246
257 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 247 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
258 if (!node) 248 if (!node)
259 return -1; 249 return -1;
260 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) { 250 if (pnp_bios_get_dev_node(&nodenum, (char )PNPMODE_DYNAMIC, node)) {
@@ -305,7 +295,7 @@ static int pnpbios_disable_resources(struct pnp_dev *dev)
305 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev)) 295 if(dev->flags & PNPBIOS_NO_DISABLE || !pnpbios_is_dynamic(dev))
306 return -EPERM; 296 return -EPERM;
307 297
308 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 298 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
309 if (!node) 299 if (!node)
310 return -ENOMEM; 300 return -ENOMEM;
311 301
@@ -347,7 +337,7 @@ static int insert_device(struct pnp_dev *dev, struct pnp_bios_node * node)
347 } 337 }
348 338
349 /* set the initial values for the PnP device */ 339 /* set the initial values for the PnP device */
350 dev_id = pnpbios_kmalloc(sizeof(struct pnp_id), GFP_KERNEL); 340 dev_id = kcalloc(1, sizeof(struct pnp_id), GFP_KERNEL);
351 if (!dev_id) 341 if (!dev_id)
352 return -1; 342 return -1;
353 pnpid32_to_pnpid(node->eisa_id,id); 343 pnpid32_to_pnpid(node->eisa_id,id);
@@ -385,7 +375,7 @@ static void __init build_devlist(void)
385 struct pnp_bios_node *node; 375 struct pnp_bios_node *node;
386 struct pnp_dev *dev; 376 struct pnp_dev *dev;
387 377
388 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 378 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
389 if (!node) 379 if (!node)
390 return; 380 return;
391 381
@@ -402,7 +392,7 @@ static void __init build_devlist(void)
402 break; 392 break;
403 } 393 }
404 nodes_got++; 394 nodes_got++;
405 dev = pnpbios_kmalloc(sizeof (struct pnp_dev), GFP_KERNEL); 395 dev = kcalloc(1, sizeof (struct pnp_dev), GFP_KERNEL);
406 if (!dev) 396 if (!dev)
407 break; 397 break;
408 if(insert_device(dev,node)<0) 398 if(insert_device(dev,node)<0)
diff --git a/drivers/pnp/pnpbios/pnpbios.h b/drivers/pnp/pnpbios/pnpbios.h
index 01896e705ed4..d8cb2fd1f127 100644
--- a/drivers/pnp/pnpbios/pnpbios.h
+++ b/drivers/pnp/pnpbios/pnpbios.h
@@ -26,7 +26,6 @@ union pnp_bios_install_struct {
26 26
27extern int pnp_bios_present(void); 27extern int pnp_bios_present(void);
28extern int pnpbios_dont_use_current_config; 28extern int pnpbios_dont_use_current_config;
29extern void *pnpbios_kmalloc(size_t size, int f);
30 29
31extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node); 30extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
32extern int pnpbios_read_resources_from_node(struct pnp_resource_table *res, struct pnp_bios_node * node); 31extern int pnpbios_read_resources_from_node(struct pnp_resource_table *res, struct pnp_bios_node * node);
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index 6bb8e1973fd4..5a3dfc97f5e9 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -87,7 +87,7 @@ static int proc_read_escd(char *buf, char **start, off_t pos,
87 return -EFBIG; 87 return -EFBIG;
88 } 88 }
89 89
90 tmpbuf = pnpbios_kmalloc(escd.escd_size, GFP_KERNEL); 90 tmpbuf = kcalloc(1, escd.escd_size, GFP_KERNEL);
91 if (!tmpbuf) return -ENOMEM; 91 if (!tmpbuf) return -ENOMEM;
92 92
93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) { 93 if (pnp_bios_read_escd(tmpbuf, escd.nv_storage_base)) {
@@ -133,7 +133,7 @@ static int proc_read_devices(char *buf, char **start, off_t pos,
133 if (pos >= 0xff) 133 if (pos >= 0xff)
134 return 0; 134 return 0;
135 135
136 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 136 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
137 if (!node) return -ENOMEM; 137 if (!node) return -ENOMEM;
138 138
139 for (nodenum=pos; nodenum<0xff; ) { 139 for (nodenum=pos; nodenum<0xff; ) {
@@ -168,7 +168,7 @@ static int proc_read_node(char *buf, char **start, off_t pos,
168 u8 nodenum = (long)data; 168 u8 nodenum = (long)data;
169 int len; 169 int len;
170 170
171 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 171 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
172 if (!node) return -ENOMEM; 172 if (!node) return -ENOMEM;
173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 173 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
174 kfree(node); 174 kfree(node);
@@ -188,7 +188,7 @@ static int proc_write_node(struct file *file, const char __user *buf,
188 u8 nodenum = (long)data; 188 u8 nodenum = (long)data;
189 int ret = count; 189 int ret = count;
190 190
191 node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL); 191 node = kcalloc(1, node_info.max_node_size, GFP_KERNEL);
192 if (!node) 192 if (!node)
193 return -ENOMEM; 193 return -ENOMEM;
194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) { 194 if (pnp_bios_get_dev_node(&nodenum, boot, node)) {
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index e305bb132c24..b0ca65b68645 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -247,7 +247,7 @@ static void
247pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option) 247pnpbios_parse_mem_option(unsigned char *p, int size, struct pnp_option *option)
248{ 248{
249 struct pnp_mem * mem; 249 struct pnp_mem * mem;
250 mem = pnpbios_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 250 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
251 if (!mem) 251 if (!mem)
252 return; 252 return;
253 mem->min = ((p[5] << 8) | p[4]) << 8; 253 mem->min = ((p[5] << 8) | p[4]) << 8;
@@ -263,7 +263,7 @@ static void
263pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option) 263pnpbios_parse_mem32_option(unsigned char *p, int size, struct pnp_option *option)
264{ 264{
265 struct pnp_mem * mem; 265 struct pnp_mem * mem;
266 mem = pnpbios_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 266 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
267 if (!mem) 267 if (!mem)
268 return; 268 return;
269 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 269 mem->min = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -279,7 +279,7 @@ static void
279pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option) 279pnpbios_parse_fixed_mem32_option(unsigned char *p, int size, struct pnp_option *option)
280{ 280{
281 struct pnp_mem * mem; 281 struct pnp_mem * mem;
282 mem = pnpbios_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL); 282 mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
283 if (!mem) 283 if (!mem)
284 return; 284 return;
285 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4]; 285 mem->min = mem->max = (p[7] << 24) | (p[6] << 16) | (p[5] << 8) | p[4];
@@ -296,7 +296,7 @@ pnpbios_parse_irq_option(unsigned char *p, int size, struct pnp_option *option)
296 struct pnp_irq * irq; 296 struct pnp_irq * irq;
297 unsigned long bits; 297 unsigned long bits;
298 298
299 irq = pnpbios_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL); 299 irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
300 if (!irq) 300 if (!irq)
301 return; 301 return;
302 bits = (p[2] << 8) | p[1]; 302 bits = (p[2] << 8) | p[1];
@@ -313,7 +313,7 @@ static void
313pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option) 313pnpbios_parse_dma_option(unsigned char *p, int size, struct pnp_option *option)
314{ 314{
315 struct pnp_dma * dma; 315 struct pnp_dma * dma;
316 dma = pnpbios_kmalloc(sizeof(struct pnp_dma), GFP_KERNEL); 316 dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
317 if (!dma) 317 if (!dma)
318 return; 318 return;
319 dma->map = p[1]; 319 dma->map = p[1];
@@ -326,7 +326,7 @@ static void
326pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option) 326pnpbios_parse_port_option(unsigned char *p, int size, struct pnp_option *option)
327{ 327{
328 struct pnp_port * port; 328 struct pnp_port * port;
329 port = pnpbios_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 329 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
330 if (!port) 330 if (!port)
331 return; 331 return;
332 port->min = (p[3] << 8) | p[2]; 332 port->min = (p[3] << 8) | p[2];
@@ -342,7 +342,7 @@ static void
342pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option) 342pnpbios_parse_fixed_port_option(unsigned char *p, int size, struct pnp_option *option)
343{ 343{
344 struct pnp_port * port; 344 struct pnp_port * port;
345 port = pnpbios_kmalloc(sizeof(struct pnp_port), GFP_KERNEL); 345 port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
346 if (!port) 346 if (!port)
347 return; 347 return;
348 port->min = port->max = (p[2] << 8) | p[1]; 348 port->min = port->max = (p[2] << 8) | p[1];
@@ -530,7 +530,7 @@ pnpbios_parse_compatible_ids(unsigned char *p, unsigned char *end, struct pnp_de
530 case SMALL_TAG_COMPATDEVID: /* compatible ID */ 530 case SMALL_TAG_COMPATDEVID: /* compatible ID */
531 if (len != 4) 531 if (len != 4)
532 goto len_err; 532 goto len_err;
533 dev_id = pnpbios_kmalloc(sizeof (struct pnp_id), GFP_KERNEL); 533 dev_id = kcalloc(1, sizeof (struct pnp_id), GFP_KERNEL);
534 if (!dev_id) 534 if (!dev_id)
535 return NULL; 535 return NULL;
536 memset(dev_id, 0, sizeof(struct pnp_id)); 536 memset(dev_id, 0, sizeof(struct pnp_id));
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 596a02d7e03d..8936b0cb2ec3 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -16,13 +16,6 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19
20#ifdef CONFIG_PNP_DEBUG
21 #define DEBUG
22#else
23 #undef DEBUG
24#endif
25
26#include <linux/pnp.h> 19#include <linux/pnp.h>
27#include "base.h" 20#include "base.h"
28 21
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index b952aec49189..61fe998944bd 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -8,13 +8,6 @@
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/ctype.h> 10#include <linux/ctype.h>
11
12#ifdef CONFIG_PNP_DEBUG
13 #define DEBUG
14#else
15 #undef DEBUG
16#endif
17
18#include <linux/pnp.h> 11#include <linux/pnp.h>
19#include "base.h" 12#include "base.h"
20 13
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 24c0af49c25c..3092473991a7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -2,9 +2,9 @@
2 * drivers/s390/net/claw.c 2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver 3 * ESCON CLAW network driver
4 * 4 *
5 * $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ 5 * $Revision: 1.38 $ $Date: 2005/08/29 09:47:04 $
6 * 6 *
7 * Linux fo zSeries version 7 * Linux for zSeries version
8 * Copyright (C) 2002,2005 IBM Corporation 8 * Copyright (C) 2002,2005 IBM Corporation
9 * Author(s) Original code written by: 9 * Author(s) Original code written by:
10 * Kazuo Iimura (iimura@jp.ibm.com) 10 * Kazuo Iimura (iimura@jp.ibm.com)
@@ -431,12 +431,12 @@ claw_pack_skb(struct claw_privbk *privptr)
431 if (!skb_queue_empty(&p_ch->collect_queue)) { 431 if (!skb_queue_empty(&p_ch->collect_queue)) {
432 /* some data */ 432 /* some data */
433 held_skb = skb_dequeue(&p_ch->collect_queue); 433 held_skb = skb_dequeue(&p_ch->collect_queue);
434 if (p_env->packing != DO_PACKED)
435 return held_skb;
436 if (held_skb) 434 if (held_skb)
437 atomic_dec(&held_skb->users); 435 dev_kfree_skb_any(held_skb);
438 else 436 else
439 return NULL; 437 return NULL;
438 if (p_env->packing != DO_PACKED)
439 return held_skb;
440 /* get a new SKB we will pack at least one */ 440 /* get a new SKB we will pack at least one */
441 new_skb = dev_alloc_skb(p_env->write_size); 441 new_skb = dev_alloc_skb(p_env->write_size);
442 if (new_skb == NULL) { 442 if (new_skb == NULL) {
@@ -455,7 +455,7 @@ claw_pack_skb(struct claw_privbk *privptr)
455 privptr->stats.tx_packets++; 455 privptr->stats.tx_packets++;
456 so_far += held_skb->len; 456 so_far += held_skb->len;
457 pkt_cnt++; 457 pkt_cnt++;
458 dev_kfree_skb_irq(held_skb); 458 dev_kfree_skb_any(held_skb);
459 held_skb = skb_dequeue(&p_ch->collect_queue); 459 held_skb = skb_dequeue(&p_ch->collect_queue);
460 if (held_skb) 460 if (held_skb)
461 atomic_dec(&held_skb->users); 461 atomic_dec(&held_skb->users);
@@ -1092,7 +1092,7 @@ claw_release(struct net_device *dev)
1092 } 1092 }
1093 } 1093 }
1094 if (privptr->pk_skb != NULL) { 1094 if (privptr->pk_skb != NULL) {
1095 dev_kfree_skb(privptr->pk_skb); 1095 dev_kfree_skb_any(privptr->pk_skb);
1096 privptr->pk_skb = NULL; 1096 privptr->pk_skb = NULL;
1097 } 1097 }
1098 if(privptr->buffs_alloc != 1) { 1098 if(privptr->buffs_alloc != 1) {
@@ -2016,7 +2016,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
2016 p_buf=(struct ccwbk*)privptr->p_end_ccw; 2016 p_buf=(struct ccwbk*)privptr->p_end_ccw;
2017 dumpit((char *)p_buf, sizeof(struct endccw)); 2017 dumpit((char *)p_buf, sizeof(struct endccw));
2018#endif 2018#endif
2019 dev_kfree_skb(skb); 2019 dev_kfree_skb_any(skb);
2020 if (linkid==0) { 2020 if (linkid==0) {
2021 lock=LOCK_NO; 2021 lock=LOCK_NO;
2022 } 2022 }
@@ -4061,7 +4061,7 @@ claw_purge_skb_queue(struct sk_buff_head *q)
4061 4061
4062 while ((skb = skb_dequeue(q))) { 4062 while ((skb = skb_dequeue(q))) {
4063 atomic_dec(&skb->users); 4063 atomic_dec(&skb->users);
4064 dev_kfree_skb_irq(skb); 4064 dev_kfree_skb_any(skb);
4065 } 4065 }
4066} 4066}
4067 4067
@@ -4410,7 +4410,7 @@ claw_init(void)
4410#else 4410#else
4411 "compiled into kernel " 4411 "compiled into kernel "
4412#endif 4412#endif
4413 " $Revision: 1.35 $ $Date: 2005/03/24 12:25:38 $ \n"); 4413 " $Revision: 1.38 $ $Date: 2005/08/29 09:47:04 $ \n");
4414 4414
4415 4415
4416#ifdef FUNCTRACE 4416#ifdef FUNCTRACE
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index f8ec6fe7d858..d40ba0bd68a3 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -88,6 +88,13 @@
88 */ 88 */
89#include <scsi/scsi_dbg.h> 89#include <scsi/scsi_dbg.h>
90 90
91#ifndef NDEBUG
92#define NDEBUG 0
93#endif
94#ifndef NDEBUG
95#define NDEBUG_ABORT 0
96#endif
97
91#if (NDEBUG & NDEBUG_LISTS) 98#if (NDEBUG & NDEBUG_LISTS)
92#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } 99#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
93#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } 100#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
@@ -359,7 +366,7 @@ static struct {
359 {PHASE_UNKNOWN, "UNKNOWN"} 366 {PHASE_UNKNOWN, "UNKNOWN"}
360}; 367};
361 368
362#ifdef NDEBUG 369#if NDEBUG
363static struct { 370static struct {
364 unsigned char mask; 371 unsigned char mask;
365 const char *name; 372 const char *name;
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 79ae73b23680..e1f2246ee7cd 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -62,7 +62,7 @@
62 62
63#define SYNC_MODE 0 /* Synchronous transfer mode */ 63#define SYNC_MODE 0 /* Synchronous transfer mode */
64 64
65#if DEBUG 65#ifdef DEBUG
66#undef NCR53C406A_DEBUG 66#undef NCR53C406A_DEBUG
67#define NCR53C406A_DEBUG 1 67#define NCR53C406A_DEBUG 1
68#endif 68#endif
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 029c2482e127..ffcdeb68641c 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -494,7 +494,7 @@ static int qs_port_start(struct ata_port *ap)
494 if (rc) 494 if (rc)
495 return rc; 495 return rc;
496 qs_enter_reg_mode(ap); 496 qs_enter_reg_mode(ap);
497 pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); 497 pp = kzalloc(sizeof(*pp), GFP_KERNEL);
498 if (!pp) { 498 if (!pp) {
499 rc = -ENOMEM; 499 rc = -ENOMEM;
500 goto err_out; 500 goto err_out;
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 9097f2f7b12a..2efb317153ce 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -40,7 +40,6 @@
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/irq.h> 41#include <asm/irq.h>
42#include <asm/system.h> 42#include <asm/system.h>
43#include <asm/segment.h>
44#include <asm/delay.h> 43#include <asm/delay.h>
45#include <asm/uaccess.h> 44#include <asm/uaccess.h>
46 45
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c
index b116122e569a..170c9d2a749c 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/serial/68360serial.c
@@ -2474,8 +2474,7 @@ static struct tty_operations rs_360_ops = {
2474 .tiocmset = rs_360_tiocmset, 2474 .tiocmset = rs_360_tiocmset,
2475}; 2475};
2476 2476
2477/* int __init rs_360_init(void) */ 2477static int __init rs_360_init(void)
2478int rs_360_init(void)
2479{ 2478{
2480 struct serial_state * state; 2479 struct serial_state * state;
2481 ser_info_t *info; 2480 ser_info_t *info;
@@ -2827,10 +2826,7 @@ int rs_360_init(void)
2827 2826
2828 return 0; 2827 return 0;
2829} 2828}
2830 2829module_init(rs_360_init);
2831
2832
2833
2834 2830
2835/* This must always be called before the rs_360_init() function, otherwise 2831/* This must always be called before the rs_360_init() function, otherwise
2836 * it blows away the port control information. 2832 * it blows away the port control information.
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 5690594b257b..40d3e7139cfe 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -446,7 +446,6 @@ static char *serial_version = "$Revision: 1.25 $";
446#include <asm/io.h> 446#include <asm/io.h>
447#include <asm/irq.h> 447#include <asm/irq.h>
448#include <asm/system.h> 448#include <asm/system.h>
449#include <asm/segment.h>
450#include <asm/bitops.h> 449#include <asm/bitops.h>
451#include <linux/delay.h> 450#include <linux/delay.h>
452 451
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 79f8df4d66b7..eb31125c6a30 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -56,7 +56,6 @@
56#include <linux/bitops.h> 56#include <linux/bitops.h>
57 57
58#include <asm/system.h> 58#include <asm/system.h>
59#include <asm/segment.h>
60#include <asm/io.h> 59#include <asm/io.h>
61#include <asm/irq.h> 60#include <asm/irq.h>
62#include <asm/uaccess.h> 61#include <asm/uaccess.h>
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c
index 8c40167778de..43b03c55f453 100644
--- a/drivers/serial/mcfserial.c
+++ b/drivers/serial/mcfserial.c
@@ -40,7 +40,6 @@
40#include <asm/io.h> 40#include <asm/io.h>
41#include <asm/irq.h> 41#include <asm/irq.h>
42#include <asm/system.h> 42#include <asm/system.h>
43#include <asm/segment.h>
44#include <asm/semaphore.h> 43#include <asm/semaphore.h>
45#include <asm/delay.h> 44#include <asm/delay.h>
46#include <asm/coldfire.h> 45#include <asm/coldfire.h>
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c
index 32f808d157a1..8302376800c0 100644
--- a/drivers/serial/serial_lh7a40x.c
+++ b/drivers/serial/serial_lh7a40x.c
@@ -207,7 +207,7 @@ static void lh7a40xuart_tx_chars (struct uart_port* port)
207 return; 207 return;
208 } 208 }
209 if (uart_circ_empty (xmit) || uart_tx_stopped (port)) { 209 if (uart_circ_empty (xmit) || uart_tx_stopped (port)) {
210 lh7a40xuart_stop_tx (port, 0); 210 lh7a40xuart_stop_tx (port);
211 return; 211 return;
212 } 212 }
213 213
@@ -229,7 +229,7 @@ static void lh7a40xuart_tx_chars (struct uart_port* port)
229 uart_write_wakeup (port); 229 uart_write_wakeup (port);
230 230
231 if (uart_circ_empty (xmit)) 231 if (uart_circ_empty (xmit))
232 lh7a40xuart_stop_tx (port, 0); 232 lh7a40xuart_stop_tx (port);
233} 233}
234 234
235static void lh7a40xuart_modem_status (struct uart_port* port) 235static void lh7a40xuart_modem_status (struct uart_port* port)
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index bb1db1959854..c466739428b2 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -960,7 +960,7 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
960 intf->altsetting->desc.bInterfaceNumber); 960 intf->altsetting->desc.bInterfaceNumber);
961 961
962 /* instance init */ 962 /* instance init */
963 instance = kcalloc(1, sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL); 963 instance = kzalloc(sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL);
964 if (!instance) { 964 if (!instance) {
965 dev_dbg(dev, "%s: no memory for instance data!\n", __func__); 965 dev_dbg(dev, "%s: no memory for instance data!\n", __func__);
966 return -ENOMEM; 966 return -ENOMEM;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 9f44e83c6a69..12ecdb03ee5f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1669,7 +1669,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
1669{ 1669{
1670 struct usb_hcd *hcd; 1670 struct usb_hcd *hcd;
1671 1671
1672 hcd = kcalloc(1, sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL); 1672 hcd = kzalloc(sizeof(*hcd) + driver->hcd_priv_size, GFP_KERNEL);
1673 if (!hcd) { 1673 if (!hcd) {
1674 dev_dbg (dev, "hcd alloc failed\n"); 1674 dev_dbg (dev, "hcd alloc failed\n");
1675 return NULL; 1675 return NULL;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index b56f25864ed6..4c972b57c7c3 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -638,7 +638,7 @@ iso_stream_alloc (unsigned mem_flags)
638{ 638{
639 struct ehci_iso_stream *stream; 639 struct ehci_iso_stream *stream;
640 640
641 stream = kcalloc(1, sizeof *stream, mem_flags); 641 stream = kzalloc(sizeof *stream, mem_flags);
642 if (likely (stream != NULL)) { 642 if (likely (stream != NULL)) {
643 INIT_LIST_HEAD(&stream->td_list); 643 INIT_LIST_HEAD(&stream->td_list);
644 INIT_LIST_HEAD(&stream->free_list); 644 INIT_LIST_HEAD(&stream->free_list);
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 76cb496c5836..75128c371800 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -717,7 +717,7 @@ static int isp116x_urb_enqueue(struct usb_hcd *hcd,
717 } 717 }
718 /* avoid all allocations within spinlocks: request or endpoint */ 718 /* avoid all allocations within spinlocks: request or endpoint */
719 if (!hep->hcpriv) { 719 if (!hep->hcpriv) {
720 ep = kcalloc(1, sizeof *ep, mem_flags); 720 ep = kzalloc(sizeof *ep, mem_flags);
721 if (!ep) 721 if (!ep)
722 return -ENOMEM; 722 return -ENOMEM;
723 } 723 }
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 80eaf659c198..d2a1fd40dfcb 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -835,7 +835,7 @@ static int sl811h_urb_enqueue(
835 835
836 /* avoid all allocations within spinlocks */ 836 /* avoid all allocations within spinlocks */
837 if (!hep->hcpriv) 837 if (!hep->hcpriv)
838 ep = kcalloc(1, sizeof *ep, mem_flags); 838 ep = kzalloc(sizeof *ep, mem_flags);
839 839
840 spin_lock_irqsave(&sl811->lock, flags); 840 spin_lock_irqsave(&sl811->lock, flags);
841 841
diff --git a/drivers/usb/input/acecad.c b/drivers/usb/input/acecad.c
index 13532f3e3efc..74f8760d7c07 100644
--- a/drivers/usb/input/acecad.c
+++ b/drivers/usb/input/acecad.c
@@ -152,7 +152,7 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_
152 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); 152 pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
153 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe)); 153 maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
154 154
155 acecad = kcalloc(1, sizeof(struct usb_acecad), GFP_KERNEL); 155 acecad = kzalloc(sizeof(struct usb_acecad), GFP_KERNEL);
156 if (!acecad) 156 if (!acecad)
157 return -ENOMEM; 157 return -ENOMEM;
158 158
diff --git a/drivers/usb/input/itmtouch.c b/drivers/usb/input/itmtouch.c
index 0dc439f10823..becb87efb869 100644
--- a/drivers/usb/input/itmtouch.c
+++ b/drivers/usb/input/itmtouch.c
@@ -166,7 +166,7 @@ static int itmtouch_probe(struct usb_interface *intf, const struct usb_device_id
166 interface = intf->cur_altsetting; 166 interface = intf->cur_altsetting;
167 endpoint = &interface->endpoint[0].desc; 167 endpoint = &interface->endpoint[0].desc;
168 168
169 if (!(itmtouch = kcalloc(1, sizeof(struct itmtouch_dev), GFP_KERNEL))) { 169 if (!(itmtouch = kzalloc(sizeof(struct itmtouch_dev), GFP_KERNEL))) {
170 err("%s - Out of memory.", __FUNCTION__); 170 err("%s - Out of memory.", __FUNCTION__);
171 return -ENOMEM; 171 return -ENOMEM;
172 } 172 }
diff --git a/drivers/usb/input/pid.c b/drivers/usb/input/pid.c
index 256963863478..acc71ec560e9 100644
--- a/drivers/usb/input/pid.c
+++ b/drivers/usb/input/pid.c
@@ -263,7 +263,7 @@ int hid_pid_init(struct hid_device *hid)
263 struct hid_ff_pid *private; 263 struct hid_ff_pid *private;
264 struct hid_input *hidinput = list_entry(&hid->inputs, struct hid_input, list); 264 struct hid_input *hidinput = list_entry(&hid->inputs, struct hid_input, list);
265 265
266 private = hid->ff_private = kcalloc(1, sizeof(struct hid_ff_pid), GFP_KERNEL); 266 private = hid->ff_private = kzalloc(sizeof(struct hid_ff_pid), GFP_KERNEL);
267 if (!private) 267 if (!private)
268 return -ENOMEM; 268 return -ENOMEM;
269 269
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 9aae884475be..4af321fae390 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,3 +3,4 @@
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o 4obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
5obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o 5obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
6obj-$(CONFIG_SHARP_LOCOMO) += locomolcd.o
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
new file mode 100644
index 000000000000..ada6e75eb048
--- /dev/null
+++ b/drivers/video/backlight/locomolcd.c
@@ -0,0 +1,157 @@
1/*
2 * Backlight control code for Sharp Zaurus SL-5500
3 *
4 * Copyright 2005 John Lenz <lenz@cs.wisc.edu>
5 * Maintainer: Pavel Machek <pavel@suse.cz> (unless John wants to :-)
6 * GPL v2
7 *
8 * This driver assumes single CPU. That's okay, because collie is
9 * slightly old hardware, and noone is going to retrofit second CPU to
10 * old PDA.
11 */
12
13/* LCD power functions */
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/interrupt.h>
20
21#include <asm/hardware/locomo.h>
22#include <asm/irq.h>
23
24#ifdef CONFIG_SA1100_COLLIE
25#include <asm/arch/collie.h>
26#else
27#include <asm/arch/poodle.h>
28#endif
29
30extern void (*sa1100fb_lcd_power)(int on);
31
32static struct locomo_dev *locomolcd_dev;
33
34static void locomolcd_on(int comadj)
35{
36 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_VSHA_ON, 0);
37 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHA_ON, 1);
38 mdelay(2);
39
40 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_VSHD_ON, 0);
41 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHD_ON, 1);
42 mdelay(2);
43
44 locomo_m62332_senddata(locomolcd_dev, comadj, 0);
45 mdelay(5);
46
47 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_VEE_ON, 0);
48 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VEE_ON, 1);
49 mdelay(10);
50
51 /* TFTCRST | CPSOUT=0 | CPSEN */
52 locomo_writel(0x01, locomolcd_dev->mapbase + LOCOMO_TC);
53
54 /* Set CPSD */
55 locomo_writel(6, locomolcd_dev->mapbase + LOCOMO_CPSD);
56
57 /* TFTCRST | CPSOUT=0 | CPSEN */
58 locomo_writel((0x04 | 0x01), locomolcd_dev->mapbase + LOCOMO_TC);
59 mdelay(10);
60
61 locomo_gpio_set_dir(locomolcd_dev, LOCOMO_GPIO_LCD_MOD, 0);
62 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_MOD, 1);
63}
64
65static void locomolcd_off(int comadj)
66{
67 /* TFTCRST=1 | CPSOUT=1 | CPSEN = 0 */
68 locomo_writel(0x06, locomolcd_dev->mapbase + LOCOMO_TC);
69 mdelay(1);
70
71 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHA_ON, 0);
72 mdelay(110);
73
74 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VEE_ON, 0);
75 mdelay(700);
76
77 /* TFTCRST=0 | CPSOUT=0 | CPSEN = 0 */
78 locomo_writel(0, locomolcd_dev->mapbase + LOCOMO_TC);
79 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_MOD, 0);
80 locomo_gpio_write(locomolcd_dev, LOCOMO_GPIO_LCD_VSHD_ON, 0);
81}
82
83void locomolcd_power(int on)
84{
85 int comadj = 118;
86 unsigned long flags;
87
88 local_irq_save(flags);
89
90 if (!locomolcd_dev) {
91 local_irq_restore(flags);
92 return;
93 }
94
95 /* read comadj */
96#ifdef CONFIG_MACH_POODLE
97 comadj = 118;
98#else
99 comadj = 128;
100#endif
101
102 if (on)
103 locomolcd_on(comadj);
104 else
105 locomolcd_off(comadj);
106
107 local_irq_restore(flags);
108}
109EXPORT_SYMBOL(locomolcd_power);
110
111static int poodle_lcd_probe(struct locomo_dev *dev)
112{
113 unsigned long flags;
114
115 local_irq_save(flags);
116 locomolcd_dev = dev;
117
118 /* the poodle_lcd_power function is called for the first time
119 * from fs_initcall, which is before locomo is activated.
120 * We need to recall poodle_lcd_power here*/
121#ifdef CONFIG_MACH_POODLE
122 locomolcd_power(1);
123#endif
124 local_irq_restore(flags);
125 return 0;
126}
127
128static int poodle_lcd_remove(struct locomo_dev *dev)
129{
130 unsigned long flags;
131 local_irq_save(flags);
132 locomolcd_dev = NULL;
133 local_irq_restore(flags);
134 return 0;
135}
136
137static struct locomo_driver poodle_lcd_driver = {
138 .drv = {
139 .name = "locomo-backlight",
140 },
141 .devid = LOCOMO_DEVID_BACKLIGHT,
142 .probe = poodle_lcd_probe,
143 .remove = poodle_lcd_remove,
144};
145
146static int __init poodle_lcd_init(void)
147{
148 int ret = locomo_driver_register(&poodle_lcd_driver);
149 if (ret) return ret;
150
151#ifdef CONFIG_SA1100_COLLIE
152 sa1100fb_lcd_power = locomolcd_power;
153#endif
154 return 0;
155}
156device_initcall(poodle_lcd_init);
157
diff --git a/drivers/video/q40fb.c b/drivers/video/q40fb.c
index 71b69da0c40d..162012bb9264 100644
--- a/drivers/video/q40fb.c
+++ b/drivers/video/q40fb.c
@@ -21,7 +21,6 @@
21 21
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/segment.h>
25#include <asm/system.h> 24#include <asm/system.h>
26#include <asm/q40_master.h> 25#include <asm/q40_master.h>
27#include <linux/fb.h> 26#include <linux/fb.h>
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index adcda697ea60..0030c071da8f 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -5,9 +5,15 @@
5 * 5 *
6 * Copyright (C) 2002, ATI Corp. 6 * Copyright (C) 2002, ATI Corp.
7 * Copyright (C) 2004-2005 Richard Purdie 7 * Copyright (C) 2004-2005 Richard Purdie
8 * Copyright (c) 2005 Ian Molton
8 * 9 *
9 * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net> 10 * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net>
10 * 11 *
12 * Generic platform support by Ian Molton <spyro@f2s.com>
13 * and Richard Purdie <rpurdie@rpsys.net>
14 *
15 * w32xx support by Ian Molton
16 *
11 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 18 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 19 * published by the Free Software Foundation.
@@ -21,7 +27,7 @@
21#include <linux/mm.h> 27#include <linux/mm.h>
22#include <linux/device.h> 28#include <linux/device.h>
23#include <linux/string.h> 29#include <linux/string.h>
24#include <linux/proc_fs.h> 30#include <linux/vmalloc.h>
25#include <asm/io.h> 31#include <asm/io.h>
26#include <asm/uaccess.h> 32#include <asm/uaccess.h>
27#include <video/w100fb.h> 33#include <video/w100fb.h>
@@ -30,114 +36,78 @@
30/* 36/*
31 * Prototypes 37 * Prototypes
32 */ 38 */
33static void w100fb_save_buffer(void);
34static void w100fb_clear_buffer(void);
35static void w100fb_restore_buffer(void);
36static void w100fb_clear_screen(u32 mode, long int offset);
37static void w100_resume(void);
38static void w100_suspend(u32 mode); 39static void w100_suspend(u32 mode);
39static void w100_init_qvga_rotation(u16 deg);
40static void w100_init_vga_rotation(u16 deg);
41static void w100_vsync(void); 40static void w100_vsync(void);
42static void w100_init_sharp_lcd(u32 mode); 41static void w100_hw_init(struct w100fb_par*);
43static void w100_pwm_setup(void); 42static void w100_pwm_setup(struct w100fb_par*);
44static void w100_InitExtMem(u32 mode); 43static void w100_init_clocks(struct w100fb_par*);
45static void w100_hw_init(void); 44static void w100_setup_memory(struct w100fb_par*);
46static u16 w100_set_fastsysclk(u16 Freq); 45static void w100_init_lcd(struct w100fb_par*);
47 46static void w100_set_dispregs(struct w100fb_par*);
48static void lcdtg_hw_init(u32 mode); 47static void w100_update_enable(void);
49static void lcdtg_lcd_change(u32 mode); 48static void w100_update_disable(void);
50static void lcdtg_resume(void); 49static void calc_hsync(struct w100fb_par *par);
51static void lcdtg_suspend(void); 50struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
52
53
54/* Register offsets & lengths */
55#define REMAPPED_FB_LEN 0x15ffff
56
57#define BITS_PER_PIXEL 16
58 51
59/* Pseudo palette size */ 52/* Pseudo palette size */
60#define MAX_PALETTES 16 53#define MAX_PALETTES 16
61 54
62/* for resolution change */
63#define LCD_MODE_INIT (-1)
64#define LCD_MODE_480 0
65#define LCD_MODE_320 1
66#define LCD_MODE_240 2
67#define LCD_MODE_640 3
68
69#define LCD_SHARP_QVGA 0
70#define LCD_SHARP_VGA 1
71
72#define LCD_MODE_PORTRAIT 0
73#define LCD_MODE_LANDSCAPE 1
74
75#define W100_SUSPEND_EXTMEM 0 55#define W100_SUSPEND_EXTMEM 0
76#define W100_SUSPEND_ALL 1 56#define W100_SUSPEND_ALL 1
77 57
78/* General frame buffer data structures */ 58#define BITS_PER_PIXEL 16
79struct w100fb_par {
80 u32 xres;
81 u32 yres;
82 int fastsysclk_mode;
83 int lcdMode;
84 int rotation_flag;
85 int blanking_flag;
86 int comadj;
87 int phadadj;
88};
89
90static struct w100fb_par *current_par;
91 59
92/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */ 60/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
93static void *remapped_base; 61static void *remapped_base;
94static void *remapped_regs; 62static void *remapped_regs;
95static void *remapped_fbuf; 63static void *remapped_fbuf;
96 64
97/* External Function */ 65#define REMAPPED_FB_LEN 0x15ffff
98static void(*w100fb_ssp_send)(u8 adrs, u8 data); 66
67/* This is the offset in the w100's address space we map the current
68 framebuffer memory to. We use the position of external memory as
69 we can remap internal memory to there if external isn't present. */
70#define W100_FB_BASE MEM_EXT_BASE_VALUE
71
99 72
100/* 73/*
101 * Sysfs functions 74 * Sysfs functions
102 */ 75 */
103 76static ssize_t flip_show(struct device *dev, struct device_attribute *attr, char *buf)
104static ssize_t rotation_show(struct device *dev, struct device_attribute *attr, char *buf)
105{ 77{
106 struct fb_info *info = dev_get_drvdata(dev); 78 struct fb_info *info = dev_get_drvdata(dev);
107 struct w100fb_par *par=info->par; 79 struct w100fb_par *par=info->par;
108 80
109 return sprintf(buf, "%d\n",par->rotation_flag); 81 return sprintf(buf, "%d\n",par->flip);
110} 82}
111 83
112static ssize_t rotation_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 84static ssize_t flip_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
113{ 85{
114 unsigned int rotate; 86 unsigned int flip;
115 struct fb_info *info = dev_get_drvdata(dev); 87 struct fb_info *info = dev_get_drvdata(dev);
116 struct w100fb_par *par=info->par; 88 struct w100fb_par *par=info->par;
117 89
118 rotate = simple_strtoul(buf, NULL, 10); 90 flip = simple_strtoul(buf, NULL, 10);
91
92 if (flip > 0)
93 par->flip = 1;
94 else
95 par->flip = 0;
119 96
120 if (rotate > 0) par->rotation_flag = 1; 97 w100_update_disable();
121 else par->rotation_flag = 0; 98 w100_set_dispregs(par);
99 w100_update_enable();
122 100
123 if (par->lcdMode == LCD_MODE_320) 101 calc_hsync(par);
124 w100_init_qvga_rotation(par->rotation_flag ? 270 : 90);
125 else if (par->lcdMode == LCD_MODE_240)
126 w100_init_qvga_rotation(par->rotation_flag ? 180 : 0);
127 else if (par->lcdMode == LCD_MODE_640)
128 w100_init_vga_rotation(par->rotation_flag ? 270 : 90);
129 else if (par->lcdMode == LCD_MODE_480)
130 w100_init_vga_rotation(par->rotation_flag ? 180 : 0);
131 102
132 return count; 103 return count;
133} 104}
134 105
135static DEVICE_ATTR(rotation, 0644, rotation_show, rotation_store); 106static DEVICE_ATTR(flip, 0644, flip_show, flip_store);
136 107
137static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 108static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
138{ 109{
139 unsigned long param; 110 unsigned long regs, param;
140 unsigned long regs;
141 regs = simple_strtoul(buf, NULL, 16); 111 regs = simple_strtoul(buf, NULL, 16);
142 param = readl(remapped_regs + regs); 112 param = readl(remapped_regs + regs);
143 printk("Read Register 0x%08lX: 0x%08lX\n", regs, param); 113 printk("Read Register 0x%08lX: 0x%08lX\n", regs, param);
@@ -148,8 +118,7 @@ static DEVICE_ATTR(reg_read, 0200, NULL, w100fb_reg_read);
148 118
149static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 119static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
150{ 120{
151 unsigned long regs; 121 unsigned long regs, param;
152 unsigned long param;
153 sscanf(buf, "%lx %lx", &regs, &param); 122 sscanf(buf, "%lx %lx", &regs, &param);
154 123
155 if (regs <= 0x2000) { 124 if (regs <= 0x2000) {
@@ -163,54 +132,56 @@ static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *att
163static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write); 132static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write);
164 133
165 134
166static ssize_t fastsysclk_show(struct device *dev, struct device_attribute *attr, char *buf) 135static ssize_t fastpllclk_show(struct device *dev, struct device_attribute *attr, char *buf)
167{ 136{
168 struct fb_info *info = dev_get_drvdata(dev); 137 struct fb_info *info = dev_get_drvdata(dev);
169 struct w100fb_par *par=info->par; 138 struct w100fb_par *par=info->par;
170 139
171 return sprintf(buf, "%d\n",par->fastsysclk_mode); 140 return sprintf(buf, "%d\n",par->fastpll_mode);
172} 141}
173 142
174static ssize_t fastsysclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 143static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
175{ 144{
176 int param;
177 struct fb_info *info = dev_get_drvdata(dev); 145 struct fb_info *info = dev_get_drvdata(dev);
178 struct w100fb_par *par=info->par; 146 struct w100fb_par *par=info->par;
179 147
180 param = simple_strtoul(buf, NULL, 10); 148 if (simple_strtoul(buf, NULL, 10) > 0) {
181 149 par->fastpll_mode=1;
182 if (param == 75) { 150 printk("w100fb: Using fast system clock (if possible)\n");
183 printk("Set fastsysclk %d\n", param); 151 } else {
184 par->fastsysclk_mode = param; 152 par->fastpll_mode=0;
185 w100_set_fastsysclk(par->fastsysclk_mode); 153 printk("w100fb: Using normal system clock\n");
186 } else if (param == 100) {
187 printk("Set fastsysclk %d\n", param);
188 par->fastsysclk_mode = param;
189 w100_set_fastsysclk(par->fastsysclk_mode);
190 } 154 }
155
156 w100_init_clocks(par);
157 calc_hsync(par);
158
191 return count; 159 return count;
192} 160}
193 161
194static DEVICE_ATTR(fastsysclk, 0644, fastsysclk_show, fastsysclk_store); 162static DEVICE_ATTR(fastpllclk, 0644, fastpllclk_show, fastpllclk_store);
195 163
196/* 164/*
197 * The touchscreen on this device needs certain information 165 * Some touchscreens need hsync information from the video driver to
198 * from the video driver to function correctly. We export it here. 166 * function correctly. We export it here.
199 */ 167 */
200int w100fb_get_xres(void) { 168unsigned long w100fb_get_hsynclen(struct device *dev)
201 return current_par->xres; 169{
202} 170 struct fb_info *info = dev_get_drvdata(dev);
171 struct w100fb_par *par=info->par;
203 172
204int w100fb_get_blanking(void) { 173 /* If display is blanked/suspended, hsync isn't active */
205 return current_par->blanking_flag; 174 if (par->blanked)
175 return 0;
176 else
177 return par->hsync_len;
206} 178}
179EXPORT_SYMBOL(w100fb_get_hsynclen);
207 180
208int w100fb_get_fastsysclk(void) { 181static void w100fb_clear_screen(struct w100fb_par *par)
209 return current_par->fastsysclk_mode; 182{
183 memset_io(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), 0, (par->xres * par->yres * BITS_PER_PIXEL/8));
210} 184}
211EXPORT_SYMBOL(w100fb_get_xres);
212EXPORT_SYMBOL(w100fb_get_blanking);
213EXPORT_SYMBOL(w100fb_get_fastsysclk);
214 185
215 186
216/* 187/*
@@ -234,7 +205,6 @@ static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
234 * according to the RGB bitfield information. 205 * according to the RGB bitfield information.
235 */ 206 */
236 if (regno < MAX_PALETTES) { 207 if (regno < MAX_PALETTES) {
237
238 u32 *pal = info->pseudo_palette; 208 u32 *pal = info->pseudo_palette;
239 209
240 val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); 210 val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
@@ -250,115 +220,90 @@ static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
250 */ 220 */
251static int w100fb_blank(int blank_mode, struct fb_info *info) 221static int w100fb_blank(int blank_mode, struct fb_info *info)
252{ 222{
253 struct w100fb_par *par; 223 struct w100fb_par *par = info->par;
254 par=info->par; 224 struct w100_tg_info *tg = par->mach->tg;
255 225
256 switch(blank_mode) { 226 switch(blank_mode) {
257 227
258 case FB_BLANK_NORMAL: /* Normal blanking */ 228 case FB_BLANK_NORMAL: /* Normal blanking */
259 case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ 229 case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
260 case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ 230 case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
261 case FB_BLANK_POWERDOWN: /* Poweroff */ 231 case FB_BLANK_POWERDOWN: /* Poweroff */
262 if (par->blanking_flag == 0) { 232 if (par->blanked == 0) {
263 w100fb_save_buffer(); 233 if(tg && tg->suspend)
264 lcdtg_suspend(); 234 tg->suspend(par);
265 par->blanking_flag = 1; 235 par->blanked = 1;
266 } 236 }
267 break; 237 break;
268 238
269 case FB_BLANK_UNBLANK: /* Unblanking */ 239 case FB_BLANK_UNBLANK: /* Unblanking */
270 if (par->blanking_flag != 0) { 240 if (par->blanked != 0) {
271 w100fb_restore_buffer(); 241 if(tg && tg->resume)
272 lcdtg_resume(); 242 tg->resume(par);
273 par->blanking_flag = 0; 243 par->blanked = 0;
274 } 244 }
275 break; 245 break;
276 } 246 }
277 return 0; 247 return 0;
278} 248}
279 249
250
280/* 251/*
281 * Change the resolution by calling the appropriate hardware functions 252 * Change the resolution by calling the appropriate hardware functions
282 */ 253 */
283static void w100fb_changeres(int rotate_mode, u32 mode) 254static void w100fb_activate_var(struct w100fb_par *par)
284{ 255{
285 u16 rotation=0; 256 struct w100_tg_info *tg = par->mach->tg;
286
287 switch(rotate_mode) {
288 case LCD_MODE_LANDSCAPE:
289 rotation=(current_par->rotation_flag ? 270 : 90);
290 break;
291 case LCD_MODE_PORTRAIT:
292 rotation=(current_par->rotation_flag ? 180 : 0);
293 break;
294 }
295 257
296 w100_pwm_setup(); 258 w100_pwm_setup(par);
297 switch(mode) { 259 w100_setup_memory(par);
298 case LCD_SHARP_QVGA: 260 w100_init_clocks(par);
299 w100_vsync(); 261 w100fb_clear_screen(par);
300 w100_suspend(W100_SUSPEND_EXTMEM); 262 w100_vsync();
301 w100_init_sharp_lcd(LCD_SHARP_QVGA); 263
302 w100_init_qvga_rotation(rotation); 264 w100_update_disable();
303 w100_InitExtMem(LCD_SHARP_QVGA); 265 w100_init_lcd(par);
304 w100fb_clear_screen(LCD_SHARP_QVGA, 0); 266 w100_set_dispregs(par);
305 lcdtg_lcd_change(LCD_SHARP_QVGA); 267 w100_update_enable();
306 break; 268
307 case LCD_SHARP_VGA: 269 calc_hsync(par);
308 w100fb_clear_screen(LCD_SHARP_QVGA, 0); 270
309 writel(0xBFFFA000, remapped_regs + mmMC_EXT_MEM_LOCATION); 271 if (!par->blanked && tg && tg->change)
310 w100_InitExtMem(LCD_SHARP_VGA); 272 tg->change(par);
311 w100fb_clear_screen(LCD_SHARP_VGA, 0x200000);
312 w100_vsync();
313 w100_init_sharp_lcd(LCD_SHARP_VGA);
314 if (rotation != 0)
315 w100_init_vga_rotation(rotation);
316 lcdtg_lcd_change(LCD_SHARP_VGA);
317 break;
318 }
319} 273}
320 274
321/* 275
322 * Set up the display for the fb subsystem 276/* Select the smallest mode that allows the desired resolution to be
277 * displayed. If desired, the x and y parameters can be rounded up to
278 * match the selected mode.
323 */ 279 */
324static void w100fb_activate_var(struct fb_info *info) 280static struct w100_mode *w100fb_get_mode(struct w100fb_par *par, unsigned int *x, unsigned int *y, int saveval)
325{ 281{
326 u32 temp32; 282 struct w100_mode *mode = NULL;
327 struct w100fb_par *par=info->par; 283 struct w100_mode *modelist = par->mach->modelist;
328 struct fb_var_screeninfo *var = &info->var; 284 unsigned int best_x = 0xffffffff, best_y = 0xffffffff;
285 unsigned int i;
286
287 for (i = 0 ; i < par->mach->num_modes ; i++) {
288 if (modelist[i].xres >= *x && modelist[i].yres >= *y &&
289 modelist[i].xres < best_x && modelist[i].yres < best_y) {
290 best_x = modelist[i].xres;
291 best_y = modelist[i].yres;
292 mode = &modelist[i];
293 } else if(modelist[i].xres >= *y && modelist[i].yres >= *x &&
294 modelist[i].xres < best_y && modelist[i].yres < best_x) {
295 best_x = modelist[i].yres;
296 best_y = modelist[i].xres;
297 mode = &modelist[i];
298 }
299 }
329 300
330 /* Set the hardware to 565 */ 301 if (mode && saveval) {
331 temp32 = readl(remapped_regs + mmDISP_DEBUG2); 302 *x = best_x;
332 temp32 &= 0xff7fffff; 303 *y = best_y;
333 temp32 |= 0x00800000; 304 }
334 writel(temp32, remapped_regs + mmDISP_DEBUG2);
335 305
336 if (par->lcdMode == LCD_MODE_INIT) { 306 return mode;
337 w100_init_sharp_lcd(LCD_SHARP_VGA);
338 w100_init_vga_rotation(par->rotation_flag ? 270 : 90);
339 par->lcdMode = LCD_MODE_640;
340 lcdtg_hw_init(LCD_SHARP_VGA);
341 } else if (var->xres == 320 && var->yres == 240) {
342 if (par->lcdMode != LCD_MODE_320) {
343 w100fb_changeres(LCD_MODE_LANDSCAPE, LCD_SHARP_QVGA);
344 par->lcdMode = LCD_MODE_320;
345 }
346 } else if (var->xres == 240 && var->yres == 320) {
347 if (par->lcdMode != LCD_MODE_240) {
348 w100fb_changeres(LCD_MODE_PORTRAIT, LCD_SHARP_QVGA);
349 par->lcdMode = LCD_MODE_240;
350 }
351 } else if (var->xres == 640 && var->yres == 480) {
352 if (par->lcdMode != LCD_MODE_640) {
353 w100fb_changeres(LCD_MODE_LANDSCAPE, LCD_SHARP_VGA);
354 par->lcdMode = LCD_MODE_640;
355 }
356 } else if (var->xres == 480 && var->yres == 640) {
357 if (par->lcdMode != LCD_MODE_480) {
358 w100fb_changeres(LCD_MODE_PORTRAIT, LCD_SHARP_VGA);
359 par->lcdMode = LCD_MODE_480;
360 }
361 } else printk(KERN_ERR "W100FB: Resolution error!\n");
362} 307}
363 308
364 309
@@ -366,31 +311,19 @@ static void w100fb_activate_var(struct fb_info *info)
366 * w100fb_check_var(): 311 * w100fb_check_var():
367 * Get the video params out of 'var'. If a value doesn't fit, round it up, 312 * Get the video params out of 'var'. If a value doesn't fit, round it up,
368 * if it's too big, return -EINVAL. 313 * if it's too big, return -EINVAL.
369 *
370 */ 314 */
371static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 315static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
372{ 316{
373 if (var->xres < var->yres) { /* Portrait mode */ 317 struct w100fb_par *par=info->par;
374 if ((var->xres > 480) || (var->yres > 640)) { 318
375 return -EINVAL; 319 if(!w100fb_get_mode(par, &var->xres, &var->yres, 1))
376 } else if ((var->xres > 240) || (var->yres > 320)) { 320 return -EINVAL;
377 var->xres = 480; 321
378 var->yres = 640; 322 if (par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (par->mach->mem->size+1)))
379 } else { 323 return -EINVAL;
380 var->xres = 240; 324
381 var->yres = 320; 325 if (!par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)))
382 } 326 return -EINVAL;
383 } else { /* Landscape mode */
384 if ((var->xres > 640) || (var->yres > 480)) {
385 return -EINVAL;
386 } else if ((var->xres > 320) || (var->yres > 240)) {
387 var->xres = 640;
388 var->yres = 480;
389 } else {
390 var->xres = 320;
391 var->yres = 240;
392 }
393 }
394 327
395 var->xres_virtual = max(var->xres_virtual, var->xres); 328 var->xres_virtual = max(var->xres_virtual, var->xres);
396 var->yres_virtual = max(var->yres_virtual, var->yres); 329 var->yres_virtual = max(var->yres_virtual, var->yres);
@@ -409,13 +342,11 @@ static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
409 var->transp.offset = var->transp.length = 0; 342 var->transp.offset = var->transp.length = 0;
410 343
411 var->nonstd = 0; 344 var->nonstd = 0;
412
413 var->height = -1; 345 var->height = -1;
414 var->width = -1; 346 var->width = -1;
415 var->vmode = FB_VMODE_NONINTERLACED; 347 var->vmode = FB_VMODE_NONINTERLACED;
416
417 var->sync = 0; 348 var->sync = 0;
418 var->pixclock = 0x04; /* 171521; */ 349 var->pixclock = 0x04; /* 171521; */
419 350
420 return 0; 351 return 0;
421} 352}
@@ -430,274 +361,286 @@ static int w100fb_set_par(struct fb_info *info)
430{ 361{
431 struct w100fb_par *par=info->par; 362 struct w100fb_par *par=info->par;
432 363
433 par->xres = info->var.xres; 364 if (par->xres != info->var.xres || par->yres != info->var.yres) {
434 par->yres = info->var.yres; 365 par->xres = info->var.xres;
435 366 par->yres = info->var.yres;
436 info->fix.visual = FB_VISUAL_TRUECOLOR; 367 par->mode = w100fb_get_mode(par, &par->xres, &par->yres, 0);
437
438 info->fix.ypanstep = 0;
439 info->fix.ywrapstep = 0;
440 368
441 if (par->blanking_flag) 369 info->fix.visual = FB_VISUAL_TRUECOLOR;
442 w100fb_clear_buffer(); 370 info->fix.ypanstep = 0;
371 info->fix.ywrapstep = 0;
372 info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
443 373
444 w100fb_activate_var(info); 374 if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
375 par->extmem_active = 1;
376 info->fix.smem_len = par->mach->mem->size+1;
377 } else {
378 par->extmem_active = 0;
379 info->fix.smem_len = MEM_INT_SIZE+1;
380 }
445 381
446 if (par->lcdMode == LCD_MODE_480) { 382 w100fb_activate_var(par);
447 info->fix.line_length = (480 * BITS_PER_PIXEL) / 8;
448 info->fix.smem_len = 0x200000;
449 } else if (par->lcdMode == LCD_MODE_320) {
450 info->fix.line_length = (320 * BITS_PER_PIXEL) / 8;
451 info->fix.smem_len = 0x60000;
452 } else if (par->lcdMode == LCD_MODE_240) {
453 info->fix.line_length = (240 * BITS_PER_PIXEL) / 8;
454 info->fix.smem_len = 0x60000;
455 } else if (par->lcdMode == LCD_MODE_INIT || par->lcdMode == LCD_MODE_640) {
456 info->fix.line_length = (640 * BITS_PER_PIXEL) / 8;
457 info->fix.smem_len = 0x200000;
458 } 383 }
459
460 return 0; 384 return 0;
461} 385}
462 386
463 387
464/* 388/*
465 * Frame buffer operations 389 * Frame buffer operations
466 */ 390 */
467static struct fb_ops w100fb_ops = { 391static struct fb_ops w100fb_ops = {
468 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
469 .fb_check_var = w100fb_check_var, 393 .fb_check_var = w100fb_check_var,
470 .fb_set_par = w100fb_set_par, 394 .fb_set_par = w100fb_set_par,
471 .fb_setcolreg = w100fb_setcolreg, 395 .fb_setcolreg = w100fb_setcolreg,
472 .fb_blank = w100fb_blank, 396 .fb_blank = w100fb_blank,
473 .fb_fillrect = cfb_fillrect, 397 .fb_fillrect = cfb_fillrect,
474 .fb_copyarea = cfb_copyarea, 398 .fb_copyarea = cfb_copyarea,
475 .fb_imageblit = cfb_imageblit, 399 .fb_imageblit = cfb_imageblit,
476 .fb_cursor = soft_cursor, 400 .fb_cursor = soft_cursor,
477}; 401};
478 402
479 403#ifdef CONFIG_PM
480static void w100fb_clear_screen(u32 mode, long int offset) 404static void w100fb_save_vidmem(struct w100fb_par *par)
481{ 405{
482 int i, numPix = 0; 406 int memsize;
483
484 if (mode == LCD_SHARP_VGA)
485 numPix = 640 * 480;
486 else if (mode == LCD_SHARP_QVGA)
487 numPix = 320 * 240;
488 407
489 for (i = 0; i < numPix; i++) 408 if (par->extmem_active) {
490 writew(0xffff, remapped_fbuf + offset + (2*i)); 409 memsize=par->mach->mem->size;
491} 410 par->saved_extmem = vmalloc(memsize);
492 411 if (par->saved_extmem)
493 412 memcpy_fromio(par->saved_extmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
494/* Need to split up the buffers to stay within the limits of kmalloc */
495#define W100_BUF_NUM 6
496static uint32_t *gSaveImagePtr[W100_BUF_NUM] = { NULL };
497
498static void w100fb_save_buffer(void)
499{
500 int i, j, bufsize;
501
502 bufsize=(current_par->xres * current_par->yres * BITS_PER_PIXEL / 8) / W100_BUF_NUM;
503 for (i = 0; i < W100_BUF_NUM; i++) {
504 if (gSaveImagePtr[i] == NULL)
505 gSaveImagePtr[i] = kmalloc(bufsize, GFP_KERNEL);
506 if (gSaveImagePtr[i] == NULL) {
507 w100fb_clear_buffer();
508 printk(KERN_WARNING "can't alloc pre-off image buffer %d\n", i);
509 break;
510 }
511 for (j = 0; j < bufsize/4; j++)
512 *(gSaveImagePtr[i] + j) = readl(remapped_fbuf + (bufsize*i) + j*4);
513 } 413 }
414 memsize=MEM_INT_SIZE;
415 par->saved_intmem = vmalloc(memsize);
416 if (par->saved_intmem && par->extmem_active)
417 memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), memsize);
418 else if (par->saved_intmem)
419 memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
514} 420}
515 421
516 422static void w100fb_restore_vidmem(struct w100fb_par *par)
517static void w100fb_restore_buffer(void)
518{ 423{
519 int i, j, bufsize; 424 int memsize;
520 425
521 bufsize=(current_par->xres * current_par->yres * BITS_PER_PIXEL / 8) / W100_BUF_NUM; 426 if (par->extmem_active && par->saved_extmem) {
522 for (i = 0; i < W100_BUF_NUM; i++) { 427 memsize=par->mach->mem->size;
523 if (gSaveImagePtr[i] == NULL) { 428 memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
524 printk(KERN_WARNING "can't find pre-off image buffer %d\n", i); 429 vfree(par->saved_extmem);
525 w100fb_clear_buffer();
526 break;
527 }
528 for (j = 0; j < (bufsize/4); j++)
529 writel(*(gSaveImagePtr[i] + j),remapped_fbuf + (bufsize*i) + (j*4));
530 kfree(gSaveImagePtr[i]);
531 gSaveImagePtr[i] = NULL;
532 } 430 }
533} 431 if (par->saved_intmem) {
534 432 memsize=MEM_INT_SIZE;
535 433 if (par->extmem_active)
536static void w100fb_clear_buffer(void) 434 memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), par->saved_intmem, memsize);
537{ 435 else
538 int i; 436 memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
539 for (i = 0; i < W100_BUF_NUM; i++) { 437 vfree(par->saved_intmem);
540 kfree(gSaveImagePtr[i]);
541 gSaveImagePtr[i] = NULL;
542 } 438 }
543} 439}
544 440
545 441static int w100fb_suspend(struct device *dev, pm_message_t state, uint32_t level)
546#ifdef CONFIG_PM
547static int w100fb_suspend(struct device *dev, pm_message_t state, u32 level)
548{ 442{
549 if (level == SUSPEND_POWER_DOWN) { 443 if (level == SUSPEND_POWER_DOWN) {
550 struct fb_info *info = dev_get_drvdata(dev); 444 struct fb_info *info = dev_get_drvdata(dev);
551 struct w100fb_par *par=info->par; 445 struct w100fb_par *par=info->par;
446 struct w100_tg_info *tg = par->mach->tg;
552 447
553 w100fb_save_buffer(); 448 w100fb_save_vidmem(par);
554 lcdtg_suspend(); 449 if(tg && tg->suspend)
450 tg->suspend(par);
555 w100_suspend(W100_SUSPEND_ALL); 451 w100_suspend(W100_SUSPEND_ALL);
556 par->blanking_flag = 1; 452 par->blanked = 1;
557 } 453 }
558 return 0; 454 return 0;
559} 455}
560 456
561static int w100fb_resume(struct device *dev, u32 level) 457static int w100fb_resume(struct device *dev, uint32_t level)
562{ 458{
563 if (level == RESUME_POWER_ON) { 459 if (level == RESUME_POWER_ON) {
564 struct fb_info *info = dev_get_drvdata(dev); 460 struct fb_info *info = dev_get_drvdata(dev);
565 struct w100fb_par *par=info->par; 461 struct w100fb_par *par=info->par;
566 462 struct w100_tg_info *tg = par->mach->tg;
567 w100_resume(); 463
568 w100fb_restore_buffer(); 464 w100_hw_init(par);
569 lcdtg_resume(); 465 w100fb_activate_var(par);
570 par->blanking_flag = 0; 466 w100fb_restore_vidmem(par);
467 if(tg && tg->resume)
468 tg->resume(par);
469 par->blanked = 0;
571 } 470 }
572 return 0; 471 return 0;
573} 472}
574#else 473#else
575#define w100fb_suspend NULL 474#define w100fb_suspend NULL
576#define w100fb_resume NULL 475#define w100fb_resume NULL
577#endif 476#endif
578 477
579 478
580int __init w100fb_probe(struct device *dev) 479int __init w100fb_probe(struct device *dev)
581{ 480{
481 int err = -EIO;
582 struct w100fb_mach_info *inf; 482 struct w100fb_mach_info *inf;
583 struct fb_info *info; 483 struct fb_info *info = NULL;
584 struct w100fb_par *par; 484 struct w100fb_par *par;
585 struct platform_device *pdev = to_platform_device(dev); 485 struct platform_device *pdev = to_platform_device(dev);
586 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 486 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
487 unsigned int chip_id;
587 488
588 if (!mem) 489 if (!mem)
589 return -EINVAL; 490 return -EINVAL;
590 491
591 /* remap the areas we're going to use */ 492 /* Remap the chip base address */
592 remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN); 493 remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
593 if (remapped_base == NULL) 494 if (remapped_base == NULL)
594 return -EIO; 495 goto out;
595 496
497 /* Map the register space */
596 remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN); 498 remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
597 if (remapped_regs == NULL) { 499 if (remapped_regs == NULL)
598 iounmap(remapped_base); 500 goto out;
599 return -EIO; 501
502 /* Identify the chip */
503 printk("Found ");
504 chip_id = readl(remapped_regs + mmCHIP_ID);
505 switch(chip_id) {
506 case CHIP_ID_W100: printk("w100"); break;
507 case CHIP_ID_W3200: printk("w3200"); break;
508 case CHIP_ID_W3220: printk("w3220"); break;
509 default:
510 printk("Unknown imageon chip ID\n");
511 err = -ENODEV;
512 goto out;
600 } 513 }
514 printk(" at 0x%08lx.\n", mem->start+W100_CFG_BASE);
601 515
602 remapped_fbuf = ioremap_nocache(mem->start+MEM_EXT_BASE_VALUE, REMAPPED_FB_LEN); 516 /* Remap the framebuffer */
603 if (remapped_fbuf == NULL) { 517 remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
604 iounmap(remapped_base); 518 if (remapped_fbuf == NULL)
605 iounmap(remapped_regs); 519 goto out;
606 return -EIO;
607 }
608 520
609 info=framebuffer_alloc(sizeof(struct w100fb_par), dev); 521 info=framebuffer_alloc(sizeof(struct w100fb_par), dev);
610 if (!info) { 522 if (!info) {
611 iounmap(remapped_base); 523 err = -ENOMEM;
612 iounmap(remapped_regs); 524 goto out;
613 iounmap(remapped_fbuf);
614 return -ENOMEM;
615 } 525 }
616 526
617 info->device=dev;
618 par = info->par; 527 par = info->par;
619 current_par=info->par;
620 dev_set_drvdata(dev, info); 528 dev_set_drvdata(dev, info);
621 529
622 inf = dev->platform_data; 530 inf = dev->platform_data;
623 par->phadadj = inf->phadadj; 531 par->chip_id = chip_id;
624 par->comadj = inf->comadj; 532 par->mach = inf;
625 par->fastsysclk_mode = 75; 533 par->fastpll_mode = 0;
626 par->lcdMode = LCD_MODE_INIT; 534 par->blanked = 0;
627 par->rotation_flag=0; 535
628 par->blanking_flag=0; 536 par->pll_table=w100_get_xtal_table(inf->xtal_freq);
629 w100fb_ssp_send = inf->w100fb_ssp_send; 537 if (!par->pll_table) {
630 538 printk(KERN_ERR "No matching Xtal definition found\n");
631 w100_hw_init(); 539 err = -EINVAL;
632 w100_pwm_setup(); 540 goto out;
541 }
633 542
634 info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL); 543 info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL);
635 if (!info->pseudo_palette) { 544 if (!info->pseudo_palette) {
636 iounmap(remapped_base); 545 err = -ENOMEM;
637 iounmap(remapped_regs); 546 goto out;
638 iounmap(remapped_fbuf);
639 return -ENOMEM;
640 } 547 }
641 548
642 info->fbops = &w100fb_ops; 549 info->fbops = &w100fb_ops;
643 info->flags = FBINFO_DEFAULT; 550 info->flags = FBINFO_DEFAULT;
644 info->node = -1; 551 info->node = -1;
645 info->screen_base = remapped_fbuf; 552 info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE);
646 info->screen_size = REMAPPED_FB_LEN; 553 info->screen_size = REMAPPED_FB_LEN;
647 554
648 info->var.xres = 640; 555 strcpy(info->fix.id, "w100fb");
556 info->fix.type = FB_TYPE_PACKED_PIXELS;
557 info->fix.type_aux = 0;
558 info->fix.accel = FB_ACCEL_NONE;
559 info->fix.smem_start = mem->start+W100_FB_BASE;
560 info->fix.mmio_start = mem->start+W100_REG_BASE;
561 info->fix.mmio_len = W100_REG_LEN;
562
563 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
564 err = -ENOMEM;
565 goto out;
566 }
567
568 par->mode = &inf->modelist[0];
569 if(inf->init_mode & INIT_MODE_ROTATED) {
570 info->var.xres = par->mode->yres;
571 info->var.yres = par->mode->xres;
572 }
573 else {
574 info->var.xres = par->mode->xres;
575 info->var.yres = par->mode->yres;
576 }
577
578 if(inf->init_mode &= INIT_MODE_FLIPPED)
579 par->flip = 1;
580 else
581 par->flip = 0;
582
649 info->var.xres_virtual = info->var.xres; 583 info->var.xres_virtual = info->var.xres;
650 info->var.yres = 480;
651 info->var.yres_virtual = info->var.yres; 584 info->var.yres_virtual = info->var.yres;
652 info->var.pixclock = 0x04; /* 171521; */ 585 info->var.pixclock = 0x04; /* 171521; */
653 info->var.sync = 0; 586 info->var.sync = 0;
654 info->var.grayscale = 0; 587 info->var.grayscale = 0;
655 info->var.xoffset = info->var.yoffset = 0; 588 info->var.xoffset = info->var.yoffset = 0;
656 info->var.accel_flags = 0; 589 info->var.accel_flags = 0;
657 info->var.activate = FB_ACTIVATE_NOW; 590 info->var.activate = FB_ACTIVATE_NOW;
658 591
659 strcpy(info->fix.id, "w100fb"); 592 w100_hw_init(par);
660 info->fix.type = FB_TYPE_PACKED_PIXELS; 593
661 info->fix.type_aux = 0; 594 if (w100fb_check_var(&info->var, info) < 0) {
662 info->fix.accel = FB_ACCEL_NONE; 595 err = -EINVAL;
663 info->fix.smem_start = mem->start+MEM_EXT_BASE_VALUE; 596 goto out;
664 info->fix.mmio_start = mem->start+W100_REG_BASE; 597 }
665 info->fix.mmio_len = W100_REG_LEN;
666 598
667 w100fb_check_var(&info->var, info);
668 w100fb_set_par(info); 599 w100fb_set_par(info);
669 600
670 if (register_framebuffer(info) < 0) { 601 if (register_framebuffer(info) < 0) {
671 kfree(info->pseudo_palette); 602 err = -EINVAL;
672 iounmap(remapped_base); 603 goto out;
673 iounmap(remapped_regs);
674 iounmap(remapped_fbuf);
675 return -EINVAL;
676 } 604 }
677 605
678 device_create_file(dev, &dev_attr_fastsysclk); 606 device_create_file(dev, &dev_attr_fastpllclk);
679 device_create_file(dev, &dev_attr_reg_read); 607 device_create_file(dev, &dev_attr_reg_read);
680 device_create_file(dev, &dev_attr_reg_write); 608 device_create_file(dev, &dev_attr_reg_write);
681 device_create_file(dev, &dev_attr_rotation); 609 device_create_file(dev, &dev_attr_flip);
682 610
683 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); 611 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
684 return 0; 612 return 0;
613out:
614 fb_dealloc_cmap(&info->cmap);
615 kfree(info->pseudo_palette);
616 if (remapped_fbuf != NULL)
617 iounmap(remapped_fbuf);
618 if (remapped_regs != NULL)
619 iounmap(remapped_regs);
620 if (remapped_base != NULL)
621 iounmap(remapped_base);
622 if (info)
623 framebuffer_release(info);
624 return err;
685} 625}
686 626
687 627
688static int w100fb_remove(struct device *dev) 628static int w100fb_remove(struct device *dev)
689{ 629{
690 struct fb_info *info = dev_get_drvdata(dev); 630 struct fb_info *info = dev_get_drvdata(dev);
631 struct w100fb_par *par=info->par;
691 632
692 device_remove_file(dev, &dev_attr_fastsysclk); 633 device_remove_file(dev, &dev_attr_fastpllclk);
693 device_remove_file(dev, &dev_attr_reg_read); 634 device_remove_file(dev, &dev_attr_reg_read);
694 device_remove_file(dev, &dev_attr_reg_write); 635 device_remove_file(dev, &dev_attr_reg_write);
695 device_remove_file(dev, &dev_attr_rotation); 636 device_remove_file(dev, &dev_attr_flip);
696 637
697 unregister_framebuffer(info); 638 unregister_framebuffer(info);
698 639
699 w100fb_clear_buffer(); 640 vfree(par->saved_intmem);
641 vfree(par->saved_extmem);
700 kfree(info->pseudo_palette); 642 kfree(info->pseudo_palette);
643 fb_dealloc_cmap(&info->cmap);
701 644
702 iounmap(remapped_base); 645 iounmap(remapped_base);
703 iounmap(remapped_regs); 646 iounmap(remapped_regs);
@@ -721,10 +664,54 @@ static void w100_soft_reset(void)
721 udelay(100); 664 udelay(100);
722} 665}
723 666
667static void w100_update_disable(void)
668{
669 union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
670
671 /* Prevent display updates */
672 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
673 disp_db_buf_wr_cntl.f.update_db_buf = 0;
674 disp_db_buf_wr_cntl.f.en_db_buf = 0;
675 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
676}
677
678static void w100_update_enable(void)
679{
680 union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
681
682 /* Enable display updates */
683 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
684 disp_db_buf_wr_cntl.f.update_db_buf = 1;
685 disp_db_buf_wr_cntl.f.en_db_buf = 1;
686 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
687}
688
689unsigned long w100fb_gpio_read(int port)
690{
691 unsigned long value;
692
693 if (port==W100_GPIO_PORT_A)
694 value = readl(remapped_regs + mmGPIO_DATA);
695 else
696 value = readl(remapped_regs + mmGPIO_DATA2);
697
698 return value;
699}
700
701void w100fb_gpio_write(int port, unsigned long value)
702{
703 if (port==W100_GPIO_PORT_A)
704 value = writel(value, remapped_regs + mmGPIO_DATA);
705 else
706 value = writel(value, remapped_regs + mmGPIO_DATA2);
707}
708EXPORT_SYMBOL(w100fb_gpio_read);
709EXPORT_SYMBOL(w100fb_gpio_write);
710
724/* 711/*
725 * Initialization of critical w100 hardware 712 * Initialization of critical w100 hardware
726 */ 713 */
727static void w100_hw_init(void) 714static void w100_hw_init(struct w100fb_par *par)
728{ 715{
729 u32 temp32; 716 u32 temp32;
730 union cif_cntl_u cif_cntl; 717 union cif_cntl_u cif_cntl;
@@ -735,8 +722,8 @@ static void w100_hw_init(void)
735 union cpu_defaults_u cpu_default; 722 union cpu_defaults_u cpu_default;
736 union cif_write_dbg_u cif_write_dbg; 723 union cif_write_dbg_u cif_write_dbg;
737 union wrap_start_dir_u wrap_start_dir; 724 union wrap_start_dir_u wrap_start_dir;
738 union mc_ext_mem_location_u mc_ext_mem_loc;
739 union cif_io_u cif_io; 725 union cif_io_u cif_io;
726 struct w100_gpio_regs *gpio = par->mach->gpio;
740 727
741 w100_soft_reset(); 728 w100_soft_reset();
742 729
@@ -791,19 +778,6 @@ static void w100_hw_init(void)
791 cfgreg_base.f.cfgreg_base = W100_CFG_BASE; 778 cfgreg_base.f.cfgreg_base = W100_CFG_BASE;
792 writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE); 779 writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE);
793 780
794 /* This location is relative to internal w100 addresses */
795 writel(0x15FF1000, remapped_regs + mmMC_FB_LOCATION);
796
797 mc_ext_mem_loc.val = defMC_EXT_MEM_LOCATION;
798 mc_ext_mem_loc.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8;
799 mc_ext_mem_loc.f.mc_ext_mem_top = MEM_EXT_TOP_VALUE >> 8;
800 writel((u32) (mc_ext_mem_loc.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
801
802 if ((current_par->lcdMode == LCD_MODE_240) || (current_par->lcdMode == LCD_MODE_320))
803 w100_InitExtMem(LCD_SHARP_QVGA);
804 else
805 w100_InitExtMem(LCD_SHARP_VGA);
806
807 wrap_start_dir.val = defWRAP_START_DIR; 781 wrap_start_dir.val = defWRAP_START_DIR;
808 wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1; 782 wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1;
809 writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR); 783 writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR);
@@ -813,21 +787,24 @@ static void w100_hw_init(void)
813 writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR); 787 writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR);
814 788
815 writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL); 789 writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL);
816}
817 790
791 /* Set the hardware to 565 colour */
792 temp32 = readl(remapped_regs + mmDISP_DEBUG2);
793 temp32 &= 0xff7fffff;
794 temp32 |= 0x00800000;
795 writel(temp32, remapped_regs + mmDISP_DEBUG2);
818 796
819/* 797 /* Initialise the GPIO lines */
820 * Types 798 if (gpio) {
821 */ 799 writel(gpio->init_data1, remapped_regs + mmGPIO_DATA);
800 writel(gpio->init_data2, remapped_regs + mmGPIO_DATA2);
801 writel(gpio->gpio_dir1, remapped_regs + mmGPIO_CNTL1);
802 writel(gpio->gpio_oe1, remapped_regs + mmGPIO_CNTL2);
803 writel(gpio->gpio_dir2, remapped_regs + mmGPIO_CNTL3);
804 writel(gpio->gpio_oe2, remapped_regs + mmGPIO_CNTL4);
805 }
806}
822 807
823struct pll_parm {
824 u16 freq; /* desired Fout for PLL */
825 u8 M;
826 u8 N_int;
827 u8 N_fac;
828 u8 tfgoal;
829 u8 lock_time;
830};
831 808
832struct power_state { 809struct power_state {
833 union clk_pin_cntl_u clk_pin_cntl; 810 union clk_pin_cntl_u clk_pin_cntl;
@@ -835,317 +812,275 @@ struct power_state {
835 union pll_cntl_u pll_cntl; 812 union pll_cntl_u pll_cntl;
836 union sclk_cntl_u sclk_cntl; 813 union sclk_cntl_u sclk_cntl;
837 union pclk_cntl_u pclk_cntl; 814 union pclk_cntl_u pclk_cntl;
838 union clk_test_cntl_u clk_test_cntl;
839 union pwrmgt_cntl_u pwrmgt_cntl; 815 union pwrmgt_cntl_u pwrmgt_cntl;
840 u32 freq; /* Fout for PLL calibration */ 816 int auto_mode; /* system clock auto changing? */
841 u8 tf100; /* for pll calibration */
842 u8 tf80; /* for pll calibration */
843 u8 tf20; /* for pll calibration */
844 u8 M; /* for pll calibration */
845 u8 N_int; /* for pll calibration */
846 u8 N_fac; /* for pll calibration */
847 u8 lock_time; /* for pll calibration */
848 u8 tfgoal; /* for pll calibration */
849 u8 auto_mode; /* hardware auto switch? */
850 u8 pwm_mode; /* 0 fast, 1 normal/slow */
851 u16 fast_sclk; /* fast clk freq */
852 u16 norm_sclk; /* slow clk freq */
853}; 817};
854 818
855 819
856/*
857 * Global state variables
858 */
859
860static struct power_state w100_pwr_state; 820static struct power_state w100_pwr_state;
861 821
862/* This table is specific for 12.5MHz ref crystal. */ 822/* The PLL Fout is determined by (XtalFreq/(M+1)) * ((N_int+1) + (N_fac/8)) */
863static struct pll_parm gPLLTable[] = { 823
864 /*freq M N_int N_fac tfgoal lock_time */ 824/* 12.5MHz Crystal PLL Table */
865 { 50, 0, 1, 0, 0xE0, 56}, /* 50.00 MHz */ 825static struct w100_pll_info xtal_12500000[] = {
866 { 75, 0, 5, 0, 0xDE, 37}, /* 75.00 MHz */ 826 /*freq M N_int N_fac tfgoal lock_time */
867 {100, 0, 7, 0, 0xE0, 28}, /* 100.00 MHz */ 827 { 50, 0, 1, 0, 0xe0, 56}, /* 50.00 MHz */
868 {125, 0, 9, 0, 0xE0, 22}, /* 125.00 MHz */ 828 { 75, 0, 5, 0, 0xde, 37}, /* 75.00 MHz */
869 {150, 0, 11, 0, 0xE0, 17}, /* 150.00 MHz */ 829 {100, 0, 7, 0, 0xe0, 28}, /* 100.00 MHz */
870 { 0, 0, 0, 0, 0, 0} /* Terminator */ 830 {125, 0, 9, 0, 0xe0, 22}, /* 125.00 MHz */
831 {150, 0, 11, 0, 0xe0, 17}, /* 150.00 MHz */
832 { 0, 0, 0, 0, 0, 0}, /* Terminator */
871}; 833};
872 834
835/* 14.318MHz Crystal PLL Table */
836static struct w100_pll_info xtal_14318000[] = {
837 /*freq M N_int N_fac tfgoal lock_time */
838 { 40, 4, 13, 0, 0xe0, 80}, /* tfgoal guessed */
839 { 50, 1, 6, 0, 0xe0, 64}, /* 50.05 MHz */
840 { 57, 2, 11, 0, 0xe0, 53}, /* tfgoal guessed */
841 { 75, 0, 4, 3, 0xe0, 43}, /* 75.08 MHz */
842 {100, 0, 6, 0, 0xe0, 32}, /* 100.10 MHz */
843 { 0, 0, 0, 0, 0, 0},
844};
873 845
874static u8 w100_pll_get_testcount(u8 testclk_sel) 846/* 16MHz Crystal PLL Table */
847static struct w100_pll_info xtal_16000000[] = {
848 /*freq M N_int N_fac tfgoal lock_time */
849 { 72, 1, 8, 0, 0xe0, 48}, /* tfgoal guessed */
850 { 95, 1, 10, 7, 0xe0, 38}, /* tfgoal guessed */
851 { 96, 1, 11, 0, 0xe0, 36}, /* tfgoal guessed */
852 { 0, 0, 0, 0, 0, 0},
853};
854
855static struct pll_entries {
856 int xtal_freq;
857 struct w100_pll_info *pll_table;
858} w100_pll_tables[] = {
859 { 12500000, &xtal_12500000[0] },
860 { 14318000, &xtal_14318000[0] },
861 { 16000000, &xtal_16000000[0] },
862 { 0 },
863};
864
865struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
875{ 866{
867 struct pll_entries *pll_entry = w100_pll_tables;
868
869 do {
870 if (freq == pll_entry->xtal_freq)
871 return pll_entry->pll_table;
872 pll_entry++;
873 } while (pll_entry->xtal_freq);
874 return 0;
875}
876
877
878static unsigned int w100_get_testcount(unsigned int testclk_sel)
879{
880 union clk_test_cntl_u clk_test_cntl;
881
876 udelay(5); 882 udelay(5);
877 883
878 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x0; 884 /* Select the test clock source and reset */
879 w100_pwr_state.clk_test_cntl.f.testclk_sel = testclk_sel; 885 clk_test_cntl.f.start_check_freq = 0x0;
880 w100_pwr_state.clk_test_cntl.f.tstcount_rst = 0x1; /*reset test count */ 886 clk_test_cntl.f.testclk_sel = testclk_sel;
881 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); 887 clk_test_cntl.f.tstcount_rst = 0x1; /* set reset */
882 w100_pwr_state.clk_test_cntl.f.tstcount_rst = 0x0; 888 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
883 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
884 889
885 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x1; 890 clk_test_cntl.f.tstcount_rst = 0x0; /* clear reset */
886 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); 891 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
887 892
893 /* Run clock test */
894 clk_test_cntl.f.start_check_freq = 0x1;
895 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
896
897 /* Give the test time to complete */
888 udelay(20); 898 udelay(20);
889 899
890 w100_pwr_state.clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL); 900 /* Return the result */
891 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x0; 901 clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL);
892 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL); 902 clk_test_cntl.f.start_check_freq = 0x0;
903 writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
893 904
894 return w100_pwr_state.clk_test_cntl.f.test_count; 905 return clk_test_cntl.f.test_count;
895} 906}
896 907
897 908
898static u8 w100_pll_adjust(void) 909static int w100_pll_adjust(struct w100_pll_info *pll)
899{ 910{
911 unsigned int tf80;
912 unsigned int tf20;
913
914 /* Initial Settings */
915 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */
916 w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */
917 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */
918 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */
919 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */
920 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */
921 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
922
923 /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V
924 * therefore, commented out the following lines
925 * tf80 meant tf100
926 */
900 do { 927 do {
901 /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V 928 /* set VCO input = 0.8 * VDD */
902 * therefore, commented out the following lines
903 * tf80 meant tf100
904 * set VCO input = 0.8 * VDD
905 */
906 w100_pwr_state.pll_cntl.f.pll_dactal = 0xd; 929 w100_pwr_state.pll_cntl.f.pll_dactal = 0xd;
907 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 930 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
908 931
909 w100_pwr_state.tf80 = w100_pll_get_testcount(0x1); /* PLLCLK */ 932 tf80 = w100_get_testcount(TESTCLK_SRC_PLL);
910 if (w100_pwr_state.tf80 >= (w100_pwr_state.tfgoal)) { 933 if (tf80 >= (pll->tfgoal)) {
911 /* set VCO input = 0.2 * VDD */ 934 /* set VCO input = 0.2 * VDD */
912 w100_pwr_state.pll_cntl.f.pll_dactal = 0x7; 935 w100_pwr_state.pll_cntl.f.pll_dactal = 0x7;
913 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 936 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
914 937
915 w100_pwr_state.tf20 = w100_pll_get_testcount(0x1); /* PLLCLK */ 938 tf20 = w100_get_testcount(TESTCLK_SRC_PLL);
916 if (w100_pwr_state.tf20 <= (w100_pwr_state.tfgoal)) 939 if (tf20 <= (pll->tfgoal))
917 return 1; // Success 940 return 1; /* Success */
918 941
919 if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) && 942 if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) &&
920 ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) || 943 ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) ||
921 (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) { 944 (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) {
922 /* slow VCO config */ 945 /* slow VCO config */
923 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1; 946 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1;
924 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; 947 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0;
925 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; 948 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
926 writel((u32) (w100_pwr_state.pll_cntl.val),
927 remapped_regs + mmPLL_CNTL);
928 continue; 949 continue;
929 } 950 }
930 } 951 }
931 if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) { 952 if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) {
932 w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1; 953 w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1;
933 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 954 } else if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) {
934 continue;
935 }
936 if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) {
937 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; 955 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
938 w100_pwr_state.pll_cntl.f.pll_pvg += 0x1; 956 w100_pwr_state.pll_cntl.f.pll_pvg += 0x1;
939 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 957 } else {
940 continue; 958 return 0; /* Error */
941 } 959 }
942 return 0; // error
943 } while(1); 960 } while(1);
944} 961}
945 962
946 963
947/* 964/*
948 * w100_pll_calibration 965 * w100_pll_calibration
949 * freq = target frequency of the PLL
950 * (note: crystal = 14.3MHz)
951 */ 966 */
952static u8 w100_pll_calibration(u32 freq) 967static int w100_pll_calibration(struct w100_pll_info *pll)
953{ 968{
954 u8 status; 969 int status;
955
956 /* initial setting */
957 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */
958 w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */
959 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */
960 w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */
961 w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */
962 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */
963 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
964 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
965 970
966 /* check for (tf80 >= tfgoal) && (tf20 =< tfgoal) */ 971 status = w100_pll_adjust(pll);
967 if ((w100_pwr_state.tf80 < w100_pwr_state.tfgoal) || (w100_pwr_state.tf20 > w100_pwr_state.tfgoal)) {
968 status=w100_pll_adjust();
969 }
970 /* PLL Reset And Lock */
971 972
973 /* PLL Reset And Lock */
972 /* set VCO input = 0.5 * VDD */ 974 /* set VCO input = 0.5 * VDD */
973 w100_pwr_state.pll_cntl.f.pll_dactal = 0xa; 975 w100_pwr_state.pll_cntl.f.pll_dactal = 0xa;
974 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 976 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
975 977
976 /* reset time */ 978 udelay(1); /* reset time */
977 udelay(1);
978 979
979 /* enable charge pump */ 980 /* enable charge pump */
980 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */ 981 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */
981 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 982 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
982 983
983 /* set VCO input = Hi-Z */ 984 /* set VCO input = Hi-Z, disable DAC */
984 /* disable DAC */
985 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; 985 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0;
986 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 986 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
987 987
988 /* lock time */ 988 udelay(400); /* lock time */
989 udelay(400); /* delay 400 us */
990 989
991 /* PLL locked */ 990 /* PLL locked */
992 991
993 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x1; /* PLL clock */
994 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
995
996 w100_pwr_state.tf100 = w100_pll_get_testcount(0x1); /* PLLCLK */
997
998 return status; 992 return status;
999} 993}
1000 994
1001 995
1002static u8 w100_pll_set_clk(void) 996static int w100_pll_set_clk(struct w100_pll_info *pll)
1003{ 997{
1004 u8 status; 998 int status;
1005 999
1006 if (w100_pwr_state.auto_mode == 1) /* auto mode */ 1000 if (w100_pwr_state.auto_mode == 1) /* auto mode */
1007 { 1001 {
1008 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */ 1002 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */
1009 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */ 1003 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */
1010 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1004 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1011 } 1005 }
1012 1006
1013 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x0; /* crystal clock */ 1007 /* Set system clock source to XTAL whilst adjusting the PLL! */
1008 w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
1014 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); 1009 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1015 1010
1016 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = w100_pwr_state.M; 1011 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = pll->M;
1017 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = w100_pwr_state.N_int; 1012 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = pll->N_int;
1018 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = w100_pwr_state.N_fac; 1013 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = pll->N_fac;
1019 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = w100_pwr_state.lock_time; 1014 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = pll->lock_time;
1020 writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV); 1015 writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV);
1021 1016
1022 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0; 1017 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0;
1023 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1018 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1024 1019
1025 status = w100_pll_calibration (w100_pwr_state.freq); 1020 status = w100_pll_calibration(pll);
1026 1021
1027 if (w100_pwr_state.auto_mode == 1) /* auto mode */ 1022 if (w100_pwr_state.auto_mode == 1) /* auto mode */
1028 { 1023 {
1029 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */ 1024 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */
1030 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */ 1025 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */
1031 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1026 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1032 } 1027 }
1033 return status; 1028 return status;
1034} 1029}
1035 1030
1036 1031/* freq = target frequency of the PLL */
1037/* assume reference crystal clk is 12.5MHz, 1032static int w100_set_pll_freq(struct w100fb_par *par, unsigned int freq)
1038 * and that doubling is not enabled.
1039 *
1040 * Freq = 12 == 12.5MHz.
1041 */
1042static u16 w100_set_slowsysclk(u16 freq)
1043{
1044 if (w100_pwr_state.norm_sclk == freq)
1045 return freq;
1046
1047 if (w100_pwr_state.auto_mode == 1) /* auto mode */
1048 return 0;
1049
1050 if (freq == 12) {
1051 w100_pwr_state.norm_sclk = freq;
1052 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */
1053 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x0; /* crystal src */
1054
1055 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1056
1057 w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x1;
1058 writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL);
1059
1060 w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x1;
1061 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1;
1062 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1063 w100_pwr_state.pwm_mode = 1; /* normal mode */
1064 return freq;
1065 } else
1066 return 0;
1067}
1068
1069
1070static u16 w100_set_fastsysclk(u16 freq)
1071{ 1033{
1072 u16 pll_freq; 1034 struct w100_pll_info *pll = par->pll_table;
1073 int i;
1074
1075 while(1) {
1076 pll_freq = (u16) (freq * (w100_pwr_state.sclk_cntl.f.sclk_post_div_fast + 1));
1077 i = 0;
1078 do {
1079 if (pll_freq == gPLLTable[i].freq) {
1080 w100_pwr_state.freq = gPLLTable[i].freq * 1000000;
1081 w100_pwr_state.M = gPLLTable[i].M;
1082 w100_pwr_state.N_int = gPLLTable[i].N_int;
1083 w100_pwr_state.N_fac = gPLLTable[i].N_fac;
1084 w100_pwr_state.tfgoal = gPLLTable[i].tfgoal;
1085 w100_pwr_state.lock_time = gPLLTable[i].lock_time;
1086 w100_pwr_state.tf20 = 0xff; /* set highest */
1087 w100_pwr_state.tf80 = 0x00; /* set lowest */
1088
1089 w100_pll_set_clk();
1090 w100_pwr_state.pwm_mode = 0; /* fast mode */
1091 w100_pwr_state.fast_sclk = freq;
1092 return freq;
1093 }
1094 i++;
1095 } while(gPLLTable[i].freq);
1096 1035
1097 if (w100_pwr_state.auto_mode == 1) 1036 do {
1098 break; 1037 if (freq == pll->freq) {
1099 1038 return w100_pll_set_clk(pll);
1100 if (w100_pwr_state.sclk_cntl.f.sclk_post_div_fast == 0) 1039 }
1101 break; 1040 pll++;
1102 1041 } while(pll->freq);
1103 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast -= 1;
1104 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1105 }
1106 return 0; 1042 return 0;
1107} 1043}
1108 1044
1109
1110/* Set up an initial state. Some values/fields set 1045/* Set up an initial state. Some values/fields set
1111 here will be overwritten. */ 1046 here will be overwritten. */
1112static void w100_pwm_setup(void) 1047static void w100_pwm_setup(struct w100fb_par *par)
1113{ 1048{
1114 w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1; 1049 w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1;
1115 w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f; 1050 w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f;
1116 w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0; 1051 w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0;
1117 w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0; 1052 w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0;
1118 w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = 0x0; /* no freq doubling */ 1053 w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = par->mach->xtal_dbl ? 1 : 0;
1119 w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0; 1054 w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0;
1120 writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL); 1055 writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL);
1121 1056
1122 w100_pwr_state.sclk_cntl.f.sclk_src_sel = 0x0; /* Crystal Clk */ 1057 w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
1123 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */ 1058 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */
1124 w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3; 1059 w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3;
1125 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */ 1060 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */
1126 w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0; 1061 w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0;
1127 w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */ 1062 w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */
1128 w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */ 1063 w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */
1129 w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */ 1064 w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */
1130 w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */ 1065 w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */
1131 w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */ 1066 w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */
1132 w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */ 1067 w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */
1133 w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */ 1068 w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */
1134 w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */ 1069 w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */
1135 w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */ 1070 w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */
1136 w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0; 1071 w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0;
1137 w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0; 1072 w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0;
1138 w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0; 1073 w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0;
1139 w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0; 1074 w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0;
1140 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL); 1075 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1141 1076
1142 w100_pwr_state.pclk_cntl.f.pclk_src_sel = 0x0; /* Crystal Clk */ 1077 w100_pwr_state.pclk_cntl.f.pclk_src_sel = CLK_SRC_XTAL;
1143 w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */ 1078 w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */
1144 w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */ 1079 w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */
1145 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); 1080 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
1146 1081
1147 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */ 1082 w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */
1148 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */ 1083 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */
1149 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0; 1084 w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0;
1150 w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5; 1085 w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5;
1151 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff; 1086 w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff;
@@ -1154,7 +1089,7 @@ static void w100_pwm_setup(void)
1154 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1; 1089 w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1;
1155 w100_pwr_state.pll_cntl.f.pll_reset = 0x1; 1090 w100_pwr_state.pll_cntl.f.pll_reset = 0x1;
1156 w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0; 1091 w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0;
1157 w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */ 1092 w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */
1158 w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0; 1093 w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0;
1159 w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0; 1094 w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0;
1160 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; 1095 w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0;
@@ -1164,220 +1099,275 @@ static void w100_pwm_setup(void)
1164 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; 1099 w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
1165 w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0; 1100 w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0;
1166 w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0; 1101 w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0;
1167 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */ 1102 w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */
1168 w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3; 1103 w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3;
1169 w100_pwr_state.pll_cntl.f.pll_conf = 0x2; 1104 w100_pwr_state.pll_cntl.f.pll_conf = 0x2;
1170 w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2; 1105 w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2;
1171 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0; 1106 w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
1172 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL); 1107 writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
1173 1108
1174 w100_pwr_state.clk_test_cntl.f.testclk_sel = 0x1; /* PLLCLK (for testing) */
1175 w100_pwr_state.clk_test_cntl.f.start_check_freq = 0x0;
1176 w100_pwr_state.clk_test_cntl.f.tstcount_rst = 0x0;
1177 writel((u32) (w100_pwr_state.clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
1178
1179 w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0; 1109 w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0;
1180 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */ 1110 w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */
1181 w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0; 1111 w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0;
1182 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; 1112 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0;
1183 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; 1113 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0;
1184 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */ 1114 w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */
1185 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */ 1115 w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */
1186 w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF; 1116 w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF;
1187 w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF; 1117 w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF;
1188 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL); 1118 writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
1189 1119
1190 w100_pwr_state.auto_mode = 0; /* manual mode */ 1120 w100_pwr_state.auto_mode = 0; /* manual mode */
1191 w100_pwr_state.pwm_mode = 1; /* normal mode (0, 1, 2) */
1192 w100_pwr_state.freq = 50000000; /* 50 MHz */
1193 w100_pwr_state.M = 3; /* M = 4 */
1194 w100_pwr_state.N_int = 6; /* N = 7.0 */
1195 w100_pwr_state.N_fac = 0;
1196 w100_pwr_state.tfgoal = 0xE0;
1197 w100_pwr_state.lock_time = 56;
1198 w100_pwr_state.tf20 = 0xff; /* set highest */
1199 w100_pwr_state.tf80 = 0x00; /* set lowest */
1200 w100_pwr_state.tf100 = 0x00; /* set lowest */
1201 w100_pwr_state.fast_sclk = 50; /* 50.0 MHz */
1202 w100_pwr_state.norm_sclk = 12; /* 12.5 MHz */
1203} 1121}
1204 1122
1205 1123
1206static void w100_init_sharp_lcd(u32 mode) 1124/*
1125 * Setup the w100 clocks for the specified mode
1126 */
1127static void w100_init_clocks(struct w100fb_par *par)
1207{ 1128{
1208 u32 temp32; 1129 struct w100_mode *mode = par->mode;
1209 union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
1210 1130
1211 /* Prevent display updates */ 1131 if (mode->pixclk_src == CLK_SRC_PLL || mode->sysclk_src == CLK_SRC_PLL)
1212 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e; 1132 w100_set_pll_freq(par, (par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq);
1213 disp_db_buf_wr_cntl.f.update_db_buf = 0;
1214 disp_db_buf_wr_cntl.f.en_db_buf = 0;
1215 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
1216 1133
1217 switch(mode) { 1134 w100_pwr_state.sclk_cntl.f.sclk_src_sel = mode->sysclk_src;
1218 case LCD_SHARP_QVGA: 1135 w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = mode->sysclk_divider;
1219 w100_set_slowsysclk(12); /* use crystal -- 12.5MHz */ 1136 w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = mode->sysclk_divider;
1220 /* not use PLL */ 1137 writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
1221 1138}
1222 writel(0x7FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION); 1139
1223 writel(0x85FF8000, remapped_regs + mmMC_FB_LOCATION); 1140static void w100_init_lcd(struct w100fb_par *par)
1224 writel(0x00000003, remapped_regs + mmLCD_FORMAT); 1141{
1225 writel(0x00CF1C06, remapped_regs + mmGRAPHIC_CTRL); 1142 u32 temp32;
1226 writel(0x01410145, remapped_regs + mmCRTC_TOTAL); 1143 struct w100_mode *mode = par->mode;
1227 writel(0x01170027, remapped_regs + mmACTIVE_H_DISP); 1144 struct w100_gen_regs *regs = par->mach->regs;
1228 writel(0x01410001, remapped_regs + mmACTIVE_V_DISP); 1145 union active_h_disp_u active_h_disp;
1229 writel(0x01170027, remapped_regs + mmGRAPHIC_H_DISP); 1146 union active_v_disp_u active_v_disp;
1230 writel(0x01410001, remapped_regs + mmGRAPHIC_V_DISP); 1147 union graphic_h_disp_u graphic_h_disp;
1231 writel(0x81170027, remapped_regs + mmCRTC_SS); 1148 union graphic_v_disp_u graphic_v_disp;
1232 writel(0xA0140000, remapped_regs + mmCRTC_LS); 1149 union crtc_total_u crtc_total;
1233 writel(0x00400008, remapped_regs + mmCRTC_REV); 1150
1234 writel(0xA0000000, remapped_regs + mmCRTC_DCLK); 1151 /* w3200 doesnt like undefined bits being set so zero register values first */
1235 writel(0xC0140014, remapped_regs + mmCRTC_GS); 1152
1236 writel(0x00010141, remapped_regs + mmCRTC_VPOS_GS); 1153 active_h_disp.val = 0;
1237 writel(0x8015010F, remapped_regs + mmCRTC_GCLK); 1154 active_h_disp.f.active_h_start=mode->left_margin;
1238 writel(0x80100110, remapped_regs + mmCRTC_GOE); 1155 active_h_disp.f.active_h_end=mode->left_margin + mode->xres;
1239 writel(0x00000000, remapped_regs + mmCRTC_FRAME); 1156 writel(active_h_disp.val, remapped_regs + mmACTIVE_H_DISP);
1240 writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS); 1157
1241 writel(0x01CC0000, remapped_regs + mmLCDD_CNTL1); 1158 active_v_disp.val = 0;
1242 writel(0x0003FFFF, remapped_regs + mmLCDD_CNTL2); 1159 active_v_disp.f.active_v_start=mode->upper_margin;
1243 writel(0x00FFFF0D, remapped_regs + mmGENLCD_CNTL1); 1160 active_v_disp.f.active_v_end=mode->upper_margin + mode->yres;
1244 writel(0x003F3003, remapped_regs + mmGENLCD_CNTL2); 1161 writel(active_v_disp.val, remapped_regs + mmACTIVE_V_DISP);
1245 writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT); 1162
1246 writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR); 1163 graphic_h_disp.val = 0;
1247 writel(0x000102aa, remapped_regs + mmGENLCD_CNTL3); 1164 graphic_h_disp.f.graphic_h_start=mode->left_margin;
1248 writel(0x00800000, remapped_regs + mmGRAPHIC_OFFSET); 1165 graphic_h_disp.f.graphic_h_end=mode->left_margin + mode->xres;
1249 writel(0x000001e0, remapped_regs + mmGRAPHIC_PITCH); 1166 writel(graphic_h_disp.val, remapped_regs + mmGRAPHIC_H_DISP);
1250 writel(0x000000bf, remapped_regs + mmGPIO_DATA); 1167
1251 writel(0x03c0feff, remapped_regs + mmGPIO_CNTL2); 1168 graphic_v_disp.val = 0;
1252 writel(0x00000000, remapped_regs + mmGPIO_CNTL1); 1169 graphic_v_disp.f.graphic_v_start=mode->upper_margin;
1253 writel(0x41060010, remapped_regs + mmCRTC_PS1_ACTIVE); 1170 graphic_v_disp.f.graphic_v_end=mode->upper_margin + mode->yres;
1254 break; 1171 writel(graphic_v_disp.val, remapped_regs + mmGRAPHIC_V_DISP);
1255 case LCD_SHARP_VGA: 1172
1256 w100_set_slowsysclk(12); /* use crystal -- 12.5MHz */ 1173 crtc_total.val = 0;
1257 w100_set_fastsysclk(current_par->fastsysclk_mode); /* use PLL -- 75.0MHz */ 1174 crtc_total.f.crtc_h_total=mode->left_margin + mode->xres + mode->right_margin;
1258 w100_pwr_state.pclk_cntl.f.pclk_src_sel = 0x1; 1175 crtc_total.f.crtc_v_total=mode->upper_margin + mode->yres + mode->lower_margin;
1259 w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x2; 1176 writel(crtc_total.val, remapped_regs + mmCRTC_TOTAL);
1260 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); 1177
1261 writel(0x15FF1000, remapped_regs + mmMC_FB_LOCATION); 1178 writel(mode->crtc_ss, remapped_regs + mmCRTC_SS);
1262 writel(0x9FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION); 1179 writel(mode->crtc_ls, remapped_regs + mmCRTC_LS);
1263 writel(0x00000003, remapped_regs + mmLCD_FORMAT); 1180 writel(mode->crtc_gs, remapped_regs + mmCRTC_GS);
1264 writel(0x00DE1D66, remapped_regs + mmGRAPHIC_CTRL); 1181 writel(mode->crtc_vpos_gs, remapped_regs + mmCRTC_VPOS_GS);
1265 1182 writel(mode->crtc_rev, remapped_regs + mmCRTC_REV);
1266 writel(0x0283028B, remapped_regs + mmCRTC_TOTAL); 1183 writel(mode->crtc_dclk, remapped_regs + mmCRTC_DCLK);
1267 writel(0x02360056, remapped_regs + mmACTIVE_H_DISP); 1184 writel(mode->crtc_gclk, remapped_regs + mmCRTC_GCLK);
1268 writel(0x02830003, remapped_regs + mmACTIVE_V_DISP); 1185 writel(mode->crtc_goe, remapped_regs + mmCRTC_GOE);
1269 writel(0x02360056, remapped_regs + mmGRAPHIC_H_DISP); 1186 writel(mode->crtc_ps1_active, remapped_regs + mmCRTC_PS1_ACTIVE);
1270 writel(0x02830003, remapped_regs + mmGRAPHIC_V_DISP); 1187
1271 writel(0x82360056, remapped_regs + mmCRTC_SS); 1188 writel(regs->lcd_format, remapped_regs + mmLCD_FORMAT);
1272 writel(0xA0280000, remapped_regs + mmCRTC_LS); 1189 writel(regs->lcdd_cntl1, remapped_regs + mmLCDD_CNTL1);
1273 writel(0x00400008, remapped_regs + mmCRTC_REV); 1190 writel(regs->lcdd_cntl2, remapped_regs + mmLCDD_CNTL2);
1274 writel(0xA0000000, remapped_regs + mmCRTC_DCLK); 1191 writel(regs->genlcd_cntl1, remapped_regs + mmGENLCD_CNTL1);
1275 writel(0x80280028, remapped_regs + mmCRTC_GS); 1192 writel(regs->genlcd_cntl2, remapped_regs + mmGENLCD_CNTL2);
1276 writel(0x02830002, remapped_regs + mmCRTC_VPOS_GS); 1193 writel(regs->genlcd_cntl3, remapped_regs + mmGENLCD_CNTL3);
1277 writel(0x8015010F, remapped_regs + mmCRTC_GCLK); 1194
1278 writel(0x80100110, remapped_regs + mmCRTC_GOE); 1195 writel(0x00000000, remapped_regs + mmCRTC_FRAME);
1279 writel(0x00000000, remapped_regs + mmCRTC_FRAME); 1196 writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS);
1280 writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS); 1197 writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT);
1281 writel(0x01CC0000, remapped_regs + mmLCDD_CNTL1); 1198 writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR);
1282 writel(0x0003FFFF, remapped_regs + mmLCDD_CNTL2);
1283 writel(0x00FFFF0D, remapped_regs + mmGENLCD_CNTL1);
1284 writel(0x003F3003, remapped_regs + mmGENLCD_CNTL2);
1285 writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT);
1286 writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR);
1287 writel(0x000102aa, remapped_regs + mmGENLCD_CNTL3);
1288 writel(0x00800000, remapped_regs + mmGRAPHIC_OFFSET);
1289 writel(0x000003C0, remapped_regs + mmGRAPHIC_PITCH);
1290 writel(0x000000bf, remapped_regs + mmGPIO_DATA);
1291 writel(0x03c0feff, remapped_regs + mmGPIO_CNTL2);
1292 writel(0x00000000, remapped_regs + mmGPIO_CNTL1);
1293 writel(0x41060010, remapped_regs + mmCRTC_PS1_ACTIVE);
1294 break;
1295 default:
1296 break;
1297 }
1298 1199
1299 /* Hack for overlay in ext memory */ 1200 /* Hack for overlay in ext memory */
1300 temp32 = readl(remapped_regs + mmDISP_DEBUG2); 1201 temp32 = readl(remapped_regs + mmDISP_DEBUG2);
1301 temp32 |= 0xc0000000; 1202 temp32 |= 0xc0000000;
1302 writel(temp32, remapped_regs + mmDISP_DEBUG2); 1203 writel(temp32, remapped_regs + mmDISP_DEBUG2);
1303
1304 /* Re-enable display updates */
1305 disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
1306 disp_db_buf_wr_cntl.f.update_db_buf = 1;
1307 disp_db_buf_wr_cntl.f.en_db_buf = 1;
1308 writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
1309} 1204}
1310 1205
1311 1206
1312static void w100_set_vga_rotation_regs(u16 divider, unsigned long ctrl, unsigned long offset, unsigned long pitch) 1207static void w100_setup_memory(struct w100fb_par *par)
1313{ 1208{
1314 w100_pwr_state.pclk_cntl.f.pclk_src_sel = 0x1; 1209 union mc_ext_mem_location_u extmem_location;
1315 w100_pwr_state.pclk_cntl.f.pclk_post_div = divider; 1210 union mc_fb_location_u intmem_location;
1316 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL); 1211 struct w100_mem_info *mem = par->mach->mem;
1212 struct w100_bm_mem_info *bm_mem = par->mach->bm_mem;
1317 1213
1318 writel(ctrl, remapped_regs + mmGRAPHIC_CTRL); 1214 if (!par->extmem_active) {
1319 writel(offset, remapped_regs + mmGRAPHIC_OFFSET); 1215 w100_suspend(W100_SUSPEND_EXTMEM);
1320 writel(pitch, remapped_regs + mmGRAPHIC_PITCH);
1321 1216
1322 /* Re-enable display updates */ 1217 /* Map Internal Memory at FB Base */
1323 writel(0x0000007b, remapped_regs + mmDISP_DB_BUF_CNTL); 1218 intmem_location.f.mc_fb_start = W100_FB_BASE >> 8;
1324} 1219 intmem_location.f.mc_fb_top = (W100_FB_BASE+MEM_INT_SIZE) >> 8;
1220 writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
1325 1221
1222 /* Unmap External Memory - value is *probably* irrelevant but may have meaning
1223 to acceleration libraries */
1224 extmem_location.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8;
1225 extmem_location.f.mc_ext_mem_top = (MEM_EXT_BASE_VALUE-1) >> 8;
1226 writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
1227 } else {
1228 /* Map Internal Memory to its default location */
1229 intmem_location.f.mc_fb_start = MEM_INT_BASE_VALUE >> 8;
1230 intmem_location.f.mc_fb_top = (MEM_INT_BASE_VALUE+MEM_INT_SIZE) >> 8;
1231 writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
1326 1232
1327static void w100_init_vga_rotation(u16 deg) 1233 /* Map External Memory at FB Base */
1328{ 1234 extmem_location.f.mc_ext_mem_start = W100_FB_BASE >> 8;
1329 switch(deg) { 1235 extmem_location.f.mc_ext_mem_top = (W100_FB_BASE+par->mach->mem->size) >> 8;
1330 case 0: 1236 writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
1331 w100_set_vga_rotation_regs(0x02, 0x00DE1D66, 0x00800000, 0x000003c0); 1237
1332 break; 1238 writel(0x00007800, remapped_regs + mmMC_BIST_CTRL);
1333 case 90: 1239 writel(mem->ext_cntl, remapped_regs + mmMEM_EXT_CNTL);
1334 w100_set_vga_rotation_regs(0x06, 0x00DE1D0e, 0x00895b00, 0x00000500); 1240 writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1335 break; 1241 udelay(100);
1336 case 180: 1242 writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1337 w100_set_vga_rotation_regs(0x02, 0x00DE1D7e, 0x00895ffc, 0x000003c0); 1243 udelay(100);
1338 break; 1244 writel(mem->sdram_mode_reg, remapped_regs + mmMEM_SDRAM_MODE_REG);
1339 case 270: 1245 udelay(100);
1340 w100_set_vga_rotation_regs(0x06, 0x00DE1D16, 0x008004fc, 0x00000500); 1246 writel(mem->ext_timing_cntl, remapped_regs + mmMEM_EXT_TIMING_CNTL);
1341 break; 1247 writel(mem->io_cntl, remapped_regs + mmMEM_IO_CNTL);
1342 default: 1248 if (bm_mem) {
1343 /* not-support */ 1249 writel(bm_mem->ext_mem_bw, remapped_regs + mmBM_EXT_MEM_BANDWIDTH);
1344 break; 1250 writel(bm_mem->offset, remapped_regs + mmBM_OFFSET);
1251 writel(bm_mem->ext_timing_ctl, remapped_regs + mmBM_MEM_EXT_TIMING_CNTL);
1252 writel(bm_mem->ext_cntl, remapped_regs + mmBM_MEM_EXT_CNTL);
1253 writel(bm_mem->mode_reg, remapped_regs + mmBM_MEM_MODE_REG);
1254 writel(bm_mem->io_cntl, remapped_regs + mmBM_MEM_IO_CNTL);
1255 writel(bm_mem->config, remapped_regs + mmBM_CONFIG);
1256 }
1345 } 1257 }
1346} 1258}
1347 1259
1348 1260static void w100_set_dispregs(struct w100fb_par *par)
1349static void w100_set_qvga_rotation_regs(unsigned long ctrl, unsigned long offset, unsigned long pitch)
1350{ 1261{
1351 writel(ctrl, remapped_regs + mmGRAPHIC_CTRL); 1262 unsigned long rot=0, divider, offset=0;
1352 writel(offset, remapped_regs + mmGRAPHIC_OFFSET); 1263 union graphic_ctrl_u graphic_ctrl;
1353 writel(pitch, remapped_regs + mmGRAPHIC_PITCH); 1264
1265 /* See if the mode has been rotated */
1266 if (par->xres == par->mode->xres) {
1267 if (par->flip) {
1268 rot=3; /* 180 degree */
1269 offset=(par->xres * par->yres) - 1;
1270 } /* else 0 degree */
1271 divider = par->mode->pixclk_divider;
1272 } else {
1273 if (par->flip) {
1274 rot=2; /* 270 degree */
1275 offset=par->xres - 1;
1276 } else {
1277 rot=1; /* 90 degree */
1278 offset=par->xres * (par->yres - 1);
1279 }
1280 divider = par->mode->pixclk_divider_rotated;
1281 }
1354 1282
1355 /* Re-enable display updates */ 1283 graphic_ctrl.val = 0; /* w32xx doesn't like undefined bits */
1356 writel(0x0000007b, remapped_regs + mmDISP_DB_BUF_CNTL); 1284 switch (par->chip_id) {
1285 case CHIP_ID_W100:
1286 graphic_ctrl.f_w100.color_depth=6;
1287 graphic_ctrl.f_w100.en_crtc=1;
1288 graphic_ctrl.f_w100.en_graphic_req=1;
1289 graphic_ctrl.f_w100.en_graphic_crtc=1;
1290 graphic_ctrl.f_w100.lcd_pclk_on=1;
1291 graphic_ctrl.f_w100.lcd_sclk_on=1;
1292 graphic_ctrl.f_w100.low_power_on=0;
1293 graphic_ctrl.f_w100.req_freq=0;
1294 graphic_ctrl.f_w100.portrait_mode=rot;
1295
1296 /* Zaurus needs this */
1297 switch(par->xres) {
1298 case 240:
1299 case 320:
1300 default:
1301 graphic_ctrl.f_w100.total_req_graphic=0xa0;
1302 break;
1303 case 480:
1304 case 640:
1305 switch(rot) {
1306 case 0: /* 0 */
1307 case 3: /* 180 */
1308 graphic_ctrl.f_w100.low_power_on=1;
1309 graphic_ctrl.f_w100.req_freq=5;
1310 break;
1311 case 1: /* 90 */
1312 case 2: /* 270 */
1313 graphic_ctrl.f_w100.req_freq=4;
1314 break;
1315 default:
1316 break;
1317 }
1318 graphic_ctrl.f_w100.total_req_graphic=0xf0;
1319 break;
1320 }
1321 break;
1322 case CHIP_ID_W3200:
1323 case CHIP_ID_W3220:
1324 graphic_ctrl.f_w32xx.color_depth=6;
1325 graphic_ctrl.f_w32xx.en_crtc=1;
1326 graphic_ctrl.f_w32xx.en_graphic_req=1;
1327 graphic_ctrl.f_w32xx.en_graphic_crtc=1;
1328 graphic_ctrl.f_w32xx.lcd_pclk_on=1;
1329 graphic_ctrl.f_w32xx.lcd_sclk_on=1;
1330 graphic_ctrl.f_w32xx.low_power_on=0;
1331 graphic_ctrl.f_w32xx.req_freq=0;
1332 graphic_ctrl.f_w32xx.total_req_graphic=par->mode->xres >> 1; /* panel xres, not mode */
1333 graphic_ctrl.f_w32xx.portrait_mode=rot;
1334 break;
1335 }
1336
1337 /* Set the pixel clock source and divider */
1338 w100_pwr_state.pclk_cntl.f.pclk_src_sel = par->mode->pixclk_src;
1339 w100_pwr_state.pclk_cntl.f.pclk_post_div = divider;
1340 writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
1341
1342 writel(graphic_ctrl.val, remapped_regs + mmGRAPHIC_CTRL);
1343 writel(W100_FB_BASE + ((offset * BITS_PER_PIXEL/8)&~0x03UL), remapped_regs + mmGRAPHIC_OFFSET);
1344 writel((par->xres*BITS_PER_PIXEL/8), remapped_regs + mmGRAPHIC_PITCH);
1357} 1345}
1358 1346
1359 1347
1360static void w100_init_qvga_rotation(u16 deg) 1348/*
1349 * Work out how long the sync pulse lasts
1350 * Value is 1/(time in seconds)
1351 */
1352static void calc_hsync(struct w100fb_par *par)
1361{ 1353{
1362 switch(deg) { 1354 unsigned long hsync;
1363 case 0: 1355 struct w100_mode *mode = par->mode;
1364 w100_set_qvga_rotation_regs(0x00d41c06, 0x00800000, 0x000001e0); 1356 union crtc_ss_u crtc_ss;
1365 break; 1357
1366 case 90: 1358 if (mode->pixclk_src == CLK_SRC_XTAL)
1367 w100_set_qvga_rotation_regs(0x00d41c0E, 0x00825580, 0x00000280); 1359 hsync=par->mach->xtal_freq;
1368 break; 1360 else
1369 case 180: 1361 hsync=((par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq)*100000;
1370 w100_set_qvga_rotation_regs(0x00d41c1e, 0x008257fc, 0x000001e0);
1371 break;
1372 case 270:
1373 w100_set_qvga_rotation_regs(0x00d41c16, 0x0080027c, 0x00000280);
1374 break;
1375 default:
1376 /* not-support */
1377 break;
1378 }
1379}
1380 1362
1363 hsync /= (w100_pwr_state.pclk_cntl.f.pclk_post_div + 1);
1364
1365 crtc_ss.val = readl(remapped_regs + mmCRTC_SS);
1366 if (crtc_ss.val)
1367 par->hsync_len = hsync / (crtc_ss.f.ss_end-crtc_ss.f.ss_start);
1368 else
1369 par->hsync_len = 0;
1370}
1381 1371
1382static void w100_suspend(u32 mode) 1372static void w100_suspend(u32 mode)
1383{ 1373{
@@ -1387,30 +1377,28 @@ static void w100_suspend(u32 mode)
1387 writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL); 1377 writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL);
1388 1378
1389 val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL); 1379 val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL);
1390 val &= ~(0x00100000); /* bit20=0 */ 1380 val &= ~(0x00100000); /* bit20=0 */
1391 val |= 0xFF000000; /* bit31:24=0xff */ 1381 val |= 0xFF000000; /* bit31:24=0xff */
1392 writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL); 1382 writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL);
1393 1383
1394 val = readl(remapped_regs + mmMEM_EXT_CNTL); 1384 val = readl(remapped_regs + mmMEM_EXT_CNTL);
1395 val &= ~(0x00040000); /* bit18=0 */ 1385 val &= ~(0x00040000); /* bit18=0 */
1396 val |= 0x00080000; /* bit19=1 */ 1386 val |= 0x00080000; /* bit19=1 */
1397 writel(val, remapped_regs + mmMEM_EXT_CNTL); 1387 writel(val, remapped_regs + mmMEM_EXT_CNTL);
1398 1388
1399 udelay(1); /* wait 1us */ 1389 udelay(1); /* wait 1us */
1400 1390
1401 if (mode == W100_SUSPEND_EXTMEM) { 1391 if (mode == W100_SUSPEND_EXTMEM) {
1402
1403 /* CKE: Tri-State */ 1392 /* CKE: Tri-State */
1404 val = readl(remapped_regs + mmMEM_EXT_CNTL); 1393 val = readl(remapped_regs + mmMEM_EXT_CNTL);
1405 val |= 0x40000000; /* bit30=1 */ 1394 val |= 0x40000000; /* bit30=1 */
1406 writel(val, remapped_regs + mmMEM_EXT_CNTL); 1395 writel(val, remapped_regs + mmMEM_EXT_CNTL);
1407 1396
1408 /* CLK: Stop */ 1397 /* CLK: Stop */
1409 val = readl(remapped_regs + mmMEM_EXT_CNTL); 1398 val = readl(remapped_regs + mmMEM_EXT_CNTL);
1410 val &= ~(0x00000001); /* bit0=0 */ 1399 val &= ~(0x00000001); /* bit0=0 */
1411 writel(val, remapped_regs + mmMEM_EXT_CNTL); 1400 writel(val, remapped_regs + mmMEM_EXT_CNTL);
1412 } else { 1401 } else {
1413
1414 writel(0x00000000, remapped_regs + mmSCLK_CNTL); 1402 writel(0x00000000, remapped_regs + mmSCLK_CNTL);
1415 writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL); 1403 writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL);
1416 writel(0x00000015, remapped_regs + mmPWRMGT_CNTL); 1404 writel(0x00000015, remapped_regs + mmPWRMGT_CNTL);
@@ -1418,43 +1406,16 @@ static void w100_suspend(u32 mode)
1418 udelay(5); 1406 udelay(5);
1419 1407
1420 val = readl(remapped_regs + mmPLL_CNTL); 1408 val = readl(remapped_regs + mmPLL_CNTL);
1421 val |= 0x00000004; /* bit2=1 */ 1409 val |= 0x00000004; /* bit2=1 */
1422 writel(val, remapped_regs + mmPLL_CNTL); 1410 writel(val, remapped_regs + mmPLL_CNTL);
1423 writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); 1411 writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL);
1424 } 1412 }
1425} 1413}
1426 1414
1427
1428static void w100_resume(void)
1429{
1430 u32 temp32;
1431
1432 w100_hw_init();
1433 w100_pwm_setup();
1434
1435 temp32 = readl(remapped_regs + mmDISP_DEBUG2);
1436 temp32 &= 0xff7fffff;
1437 temp32 |= 0x00800000;
1438 writel(temp32, remapped_regs + mmDISP_DEBUG2);
1439
1440 if (current_par->lcdMode == LCD_MODE_480 || current_par->lcdMode == LCD_MODE_640) {
1441 w100_init_sharp_lcd(LCD_SHARP_VGA);
1442 if (current_par->lcdMode == LCD_MODE_640) {
1443 w100_init_vga_rotation(current_par->rotation_flag ? 270 : 90);
1444 }
1445 } else {
1446 w100_init_sharp_lcd(LCD_SHARP_QVGA);
1447 if (current_par->lcdMode == LCD_MODE_320) {
1448 w100_init_qvga_rotation(current_par->rotation_flag ? 270 : 90);
1449 }
1450 }
1451}
1452
1453
1454static void w100_vsync(void) 1415static void w100_vsync(void)
1455{ 1416{
1456 u32 tmp; 1417 u32 tmp;
1457 int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */ 1418 int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */
1458 1419
1459 tmp = readl(remapped_regs + mmACTIVE_V_DISP); 1420 tmp = readl(remapped_regs + mmACTIVE_V_DISP);
1460 1421
@@ -1490,363 +1451,6 @@ static void w100_vsync(void)
1490 writel(0x00000002, remapped_regs + mmGEN_INT_STATUS); 1451 writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
1491} 1452}
1492 1453
1493
1494static void w100_InitExtMem(u32 mode)
1495{
1496 switch(mode) {
1497 case LCD_SHARP_QVGA:
1498 /* QVGA doesn't use external memory
1499 nothing to do, really. */
1500 break;
1501 case LCD_SHARP_VGA:
1502 writel(0x00007800, remapped_regs + mmMC_BIST_CTRL);
1503 writel(0x00040003, remapped_regs + mmMEM_EXT_CNTL);
1504 writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1505 udelay(100);
1506 writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1507 udelay(100);
1508 writel(0x00650021, remapped_regs + mmMEM_SDRAM_MODE_REG);
1509 udelay(100);
1510 writel(0x10002a4a, remapped_regs + mmMEM_EXT_TIMING_CNTL);
1511 writel(0x7ff87012, remapped_regs + mmMEM_IO_CNTL);
1512 break;
1513 default:
1514 break;
1515 }
1516}
1517
1518
1519#define RESCTL_ADRS 0x00
1520#define PHACTRL_ADRS 0x01
1521#define DUTYCTRL_ADRS 0x02
1522#define POWERREG0_ADRS 0x03
1523#define POWERREG1_ADRS 0x04
1524#define GPOR3_ADRS 0x05
1525#define PICTRL_ADRS 0x06
1526#define POLCTRL_ADRS 0x07
1527
1528#define RESCTL_QVGA 0x01
1529#define RESCTL_VGA 0x00
1530
1531#define POWER1_VW_ON 0x01 /* VW Supply FET ON */
1532#define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */
1533#define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */
1534
1535#define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */
1536#define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */
1537#define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */
1538
1539#define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */
1540#define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */
1541#define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */
1542#define POWER0_COM_ON 0x08 /* COM Powewr Supply ON */
1543#define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */
1544
1545#define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */
1546#define POWER0_COM_OFF 0x00 /* COM Powewr Supply OFF */
1547#define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */
1548
1549#define PICTRL_INIT_STATE 0x01
1550#define PICTRL_INIOFF 0x02
1551#define PICTRL_POWER_DOWN 0x04
1552#define PICTRL_COM_SIGNAL_OFF 0x08
1553#define PICTRL_DAC_SIGNAL_OFF 0x10
1554
1555#define PICTRL_POWER_ACTIVE (0)
1556
1557#define POLCTRL_SYNC_POL_FALL 0x01
1558#define POLCTRL_EN_POL_FALL 0x02
1559#define POLCTRL_DATA_POL_FALL 0x04
1560#define POLCTRL_SYNC_ACT_H 0x08
1561#define POLCTRL_EN_ACT_L 0x10
1562
1563#define POLCTRL_SYNC_POL_RISE 0x00
1564#define POLCTRL_EN_POL_RISE 0x00
1565#define POLCTRL_DATA_POL_RISE 0x00
1566#define POLCTRL_SYNC_ACT_L 0x00
1567#define POLCTRL_EN_ACT_H 0x00
1568
1569#define PHACTRL_PHASE_MANUAL 0x01
1570
1571#define PHAD_QVGA_DEFAULT_VAL (9)
1572#define COMADJ_DEFAULT (125)
1573
1574static void lcdtg_ssp_send(u8 adrs, u8 data)
1575{
1576 w100fb_ssp_send(adrs,data);
1577}
1578
1579/*
1580 * This is only a psuedo I2C interface. We can't use the standard kernel
1581 * routines as the interface is write only. We just assume the data is acked...
1582 */
1583static void lcdtg_ssp_i2c_send(u8 data)
1584{
1585 lcdtg_ssp_send(POWERREG0_ADRS, data);
1586 udelay(10);
1587}
1588
1589static void lcdtg_i2c_send_bit(u8 data)
1590{
1591 lcdtg_ssp_i2c_send(data);
1592 lcdtg_ssp_i2c_send(data | POWER0_COM_DCLK);
1593 lcdtg_ssp_i2c_send(data);
1594}
1595
1596static void lcdtg_i2c_send_start(u8 base)
1597{
1598 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
1599 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
1600 lcdtg_ssp_i2c_send(base);
1601}
1602
1603static void lcdtg_i2c_send_stop(u8 base)
1604{
1605 lcdtg_ssp_i2c_send(base);
1606 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK);
1607 lcdtg_ssp_i2c_send(base | POWER0_COM_DCLK | POWER0_COM_DOUT);
1608}
1609
1610static void lcdtg_i2c_send_byte(u8 base, u8 data)
1611{
1612 int i;
1613 for (i = 0; i < 8; i++) {
1614 if (data & 0x80)
1615 lcdtg_i2c_send_bit(base | POWER0_COM_DOUT);
1616 else
1617 lcdtg_i2c_send_bit(base);
1618 data <<= 1;
1619 }
1620}
1621
1622static void lcdtg_i2c_wait_ack(u8 base)
1623{
1624 lcdtg_i2c_send_bit(base);
1625}
1626
1627static void lcdtg_set_common_voltage(u8 base_data, u8 data)
1628{
1629 /* Set Common Voltage to M62332FP via I2C */
1630 lcdtg_i2c_send_start(base_data);
1631 lcdtg_i2c_send_byte(base_data, 0x9c);
1632 lcdtg_i2c_wait_ack(base_data);
1633 lcdtg_i2c_send_byte(base_data, 0x00);
1634 lcdtg_i2c_wait_ack(base_data);
1635 lcdtg_i2c_send_byte(base_data, data);
1636 lcdtg_i2c_wait_ack(base_data);
1637 lcdtg_i2c_send_stop(base_data);
1638}
1639
1640static struct lcdtg_register_setting {
1641 u8 adrs;
1642 u8 data;
1643 u32 wait;
1644} lcdtg_power_on_table[] = {
1645
1646 /* Initialize Internal Logic & Port */
1647 { PICTRL_ADRS,
1648 PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE |
1649 PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF,
1650 0 },
1651
1652 { POWERREG0_ADRS,
1653 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF | POWER0_COM_OFF |
1654 POWER0_VCC5_OFF,
1655 0 },
1656
1657 { POWERREG1_ADRS,
1658 POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF,
1659 0 },
1660
1661 /* VDD(+8V),SVSS(-4V) ON */
1662 { POWERREG1_ADRS,
1663 POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON /* VDD ON */,
1664 3000 },
1665
1666 /* DAC ON */
1667 { POWERREG0_ADRS,
1668 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON /* DAC ON */ |
1669 POWER0_COM_OFF | POWER0_VCC5_OFF,
1670 0 },
1671
1672 /* INIB = H, INI = L */
1673 { PICTRL_ADRS,
1674 /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */
1675 PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF,
1676 0 },
1677
1678 /* Set Common Voltage */
1679 { 0xfe, 0, 0 },
1680
1681 /* VCC5 ON */
1682 { POWERREG0_ADRS,
1683 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON /* DAC ON */ |
1684 POWER0_COM_OFF | POWER0_VCC5_ON /* VCC5 ON */,
1685 0 },
1686
1687 /* GVSS(-8V) ON */
1688 { POWERREG1_ADRS,
1689 POWER1_VW_OFF | POWER1_GVSS_ON /* GVSS ON */ |
1690 POWER1_VDD_ON /* VDD ON */,
1691 2000 },
1692
1693 /* COM SIGNAL ON (PICTL[3] = L) */
1694 { PICTRL_ADRS,
1695 PICTRL_INIT_STATE,
1696 0 },
1697
1698 /* COM ON */
1699 { POWERREG0_ADRS,
1700 POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON /* DAC ON */ |
1701 POWER0_COM_ON /* COM ON */ | POWER0_VCC5_ON /* VCC5_ON */,
1702 0 },
1703
1704 /* VW ON */
1705 { POWERREG1_ADRS,
1706 POWER1_VW_ON /* VW ON */ | POWER1_GVSS_ON /* GVSS ON */ |
1707 POWER1_VDD_ON /* VDD ON */,
1708 0 /* Wait 100ms */ },
1709
1710 /* Signals output enable */
1711 { PICTRL_ADRS,
1712 0 /* Signals output enable */,
1713 0 },
1714
1715 { PHACTRL_ADRS,
1716 PHACTRL_PHASE_MANUAL,
1717 0 },
1718
1719 /* Initialize for Input Signals from ATI */
1720 { POLCTRL_ADRS,
1721 POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE | POLCTRL_DATA_POL_RISE |
1722 POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H,
1723 1000 /*100000*/ /* Wait 100ms */ },
1724
1725 /* end mark */
1726 { 0xff, 0, 0 }
1727};
1728
1729static void lcdtg_resume(void)
1730{
1731 if (current_par->lcdMode == LCD_MODE_480 || current_par->lcdMode == LCD_MODE_640) {
1732 lcdtg_hw_init(LCD_SHARP_VGA);
1733 } else {
1734 lcdtg_hw_init(LCD_SHARP_QVGA);
1735 }
1736}
1737
1738static void lcdtg_suspend(void)
1739{
1740 int i;
1741
1742 for (i = 0; i < (current_par->xres * current_par->yres); i++) {
1743 writew(0xffff, remapped_fbuf + (2*i));
1744 }
1745
1746 /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */
1747 mdelay(34);
1748
1749 /* (1)VW OFF */
1750 lcdtg_ssp_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON);
1751
1752 /* (2)COM OFF */
1753 lcdtg_ssp_send(PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF);
1754 lcdtg_ssp_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON);
1755
1756 /* (3)Set Common Voltage Bias 0V */
1757 lcdtg_set_common_voltage(POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0);
1758
1759 /* (4)GVSS OFF */
1760 lcdtg_ssp_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON);
1761
1762 /* (5)VCC5 OFF */
1763 lcdtg_ssp_send(POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF);
1764
1765 /* (6)Set PDWN, INIOFF, DACOFF */
1766 lcdtg_ssp_send(PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF |
1767 PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF);
1768
1769 /* (7)DAC OFF */
1770 lcdtg_ssp_send(POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF);
1771
1772 /* (8)VDD OFF */
1773 lcdtg_ssp_send(POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF);
1774
1775}
1776
1777static void lcdtg_set_phadadj(u32 mode)
1778{
1779 int adj;
1780
1781 if (mode == LCD_SHARP_VGA) {
1782 /* Setting for VGA */
1783 adj = current_par->phadadj;
1784 if (adj < 0) {
1785 adj = PHACTRL_PHASE_MANUAL;
1786 } else {
1787 adj = ((adj & 0x0f) << 1) | PHACTRL_PHASE_MANUAL;
1788 }
1789 } else {
1790 /* Setting for QVGA */
1791 adj = (PHAD_QVGA_DEFAULT_VAL << 1) | PHACTRL_PHASE_MANUAL;
1792 }
1793 lcdtg_ssp_send(PHACTRL_ADRS, adj);
1794}
1795
1796static void lcdtg_hw_init(u32 mode)
1797{
1798 int i;
1799 int comadj;
1800
1801 i = 0;
1802 while(lcdtg_power_on_table[i].adrs != 0xff) {
1803 if (lcdtg_power_on_table[i].adrs == 0xfe) {
1804 /* Set Common Voltage */
1805 comadj = current_par->comadj;
1806 if (comadj < 0) {
1807 comadj = COMADJ_DEFAULT;
1808 }
1809 lcdtg_set_common_voltage((POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF), comadj);
1810 } else if (lcdtg_power_on_table[i].adrs == PHACTRL_ADRS) {
1811 /* Set Phase Adjuct */
1812 lcdtg_set_phadadj(mode);
1813 } else {
1814 /* Other */
1815 lcdtg_ssp_send(lcdtg_power_on_table[i].adrs, lcdtg_power_on_table[i].data);
1816 }
1817 if (lcdtg_power_on_table[i].wait != 0)
1818 udelay(lcdtg_power_on_table[i].wait);
1819 i++;
1820 }
1821
1822 switch(mode) {
1823 case LCD_SHARP_QVGA:
1824 /* Set Lcd Resolution (QVGA) */
1825 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_QVGA);
1826 break;
1827 case LCD_SHARP_VGA:
1828 /* Set Lcd Resolution (VGA) */
1829 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_VGA);
1830 break;
1831 default:
1832 break;
1833 }
1834}
1835
1836static void lcdtg_lcd_change(u32 mode)
1837{
1838 /* Set Phase Adjuct */
1839 lcdtg_set_phadadj(mode);
1840
1841 if (mode == LCD_SHARP_VGA)
1842 /* Set Lcd Resolution (VGA) */
1843 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_VGA);
1844 else if (mode == LCD_SHARP_QVGA)
1845 /* Set Lcd Resolution (QVGA) */
1846 lcdtg_ssp_send(RESCTL_ADRS, RESCTL_QVGA);
1847}
1848
1849
1850static struct device_driver w100fb_driver = { 1454static struct device_driver w100fb_driver = {
1851 .name = "w100fb", 1455 .name = "w100fb",
1852 .bus = &platform_bus_type, 1456 .bus = &platform_bus_type,
@@ -1870,4 +1474,4 @@ module_init(w100fb_init);
1870module_exit(w100fb_cleanup); 1474module_exit(w100fb_cleanup);
1871 1475
1872MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver"); 1476MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
1873MODULE_LICENSE("GPLv2"); 1477MODULE_LICENSE("GPL");
diff --git a/drivers/video/w100fb.h b/drivers/video/w100fb.h
index 41624f961237..7a58a1e3e427 100644
--- a/drivers/video/w100fb.h
+++ b/drivers/video/w100fb.h
@@ -5,9 +5,12 @@
5 * 5 *
6 * Copyright (C) 2002, ATI Corp. 6 * Copyright (C) 2002, ATI Corp.
7 * Copyright (C) 2004-2005 Richard Purdie 7 * Copyright (C) 2004-2005 Richard Purdie
8 * Copyright (c) 2005 Ian Molton <spyro@f2s.com>
8 * 9 *
9 * Modified to work with 2.6 by Richard Purdie <rpurdie@rpsys.net> 10 * Modified to work with 2.6 by Richard Purdie <rpurdie@rpsys.net>
10 * 11 *
12 * w32xx support by Ian Molton
13 *
11 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 15 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 16 * published by the Free Software Foundation.
@@ -19,7 +22,7 @@
19 22
20/* Block CIF Start: */ 23/* Block CIF Start: */
21#define mmCHIP_ID 0x0000 24#define mmCHIP_ID 0x0000
22#define mmREVISION_ID 0x0004 25#define mmREVISION_ID 0x0004
23#define mmWRAP_BUF_A 0x0008 26#define mmWRAP_BUF_A 0x0008
24#define mmWRAP_BUF_B 0x000C 27#define mmWRAP_BUF_B 0x000C
25#define mmWRAP_TOP_DIR 0x0010 28#define mmWRAP_TOP_DIR 0x0010
@@ -88,7 +91,7 @@
88#define mmDISP_DEBUG 0x04D4 91#define mmDISP_DEBUG 0x04D4
89#define mmDISP_DB_BUF_CNTL 0x04D8 92#define mmDISP_DB_BUF_CNTL 0x04D8
90#define mmDISP_CRC_SIG 0x04DC 93#define mmDISP_CRC_SIG 0x04DC
91#define mmCRTC_DEFAULT_COUNT 0x04E0 94#define mmCRTC_DEFAULT_COUNT 0x04E0
92#define mmLCD_BACKGROUND_COLOR 0x04E4 95#define mmLCD_BACKGROUND_COLOR 0x04E4
93#define mmCRTC_PS2 0x04E8 96#define mmCRTC_PS2 0x04E8
94#define mmCRTC_PS2_VPOS 0x04EC 97#define mmCRTC_PS2_VPOS 0x04EC
@@ -119,17 +122,17 @@
119/* Block DISPLAY End: */ 122/* Block DISPLAY End: */
120 123
121/* Block GFX Start: */ 124/* Block GFX Start: */
122#define mmBRUSH_OFFSET 0x108C 125#define mmBRUSH_OFFSET 0x108C
123#define mmBRUSH_Y_X 0x1074 126#define mmBRUSH_Y_X 0x1074
124#define mmDEFAULT_PITCH_OFFSET 0x10A0 127#define mmDEFAULT_PITCH_OFFSET 0x10A0
125#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8 128#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8
126#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC 129#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC
127#define mmGLOBAL_ALPHA 0x1210 130#define mmGLOBAL_ALPHA 0x1210
128#define mmFILTER_COEF 0x1214 131#define mmFILTER_COEF 0x1214
129#define mmMVC_CNTL_START 0x11E0 132#define mmMVC_CNTL_START 0x11E0
130#define mmE2_ARITHMETIC_CNTL 0x1220 133#define mmE2_ARITHMETIC_CNTL 0x1220
131#define mmENG_CNTL 0x13E8 134#define mmENG_CNTL 0x13E8
132#define mmENG_PERF_CNT 0x13F0 135#define mmENG_PERF_CNT 0x13F0
133/* Block GFX End: */ 136/* Block GFX End: */
134 137
135/* Block IDCT Start: */ 138/* Block IDCT Start: */
@@ -141,22 +144,38 @@
141/* Block IDCT End: */ 144/* Block IDCT End: */
142 145
143/* Block MC Start: */ 146/* Block MC Start: */
144#define mmMEM_CNTL 0x0180 147#define mmMEM_CNTL 0x0180
145#define mmMEM_ARB 0x0184 148#define mmMEM_ARB 0x0184
146#define mmMC_FB_LOCATION 0x0188 149#define mmMC_FB_LOCATION 0x0188
147#define mmMEM_EXT_CNTL 0x018C 150#define mmMEM_EXT_CNTL 0x018C
148#define mmMC_EXT_MEM_LOCATION 0x0190 151#define mmMC_EXT_MEM_LOCATION 0x0190
149#define mmMEM_EXT_TIMING_CNTL 0x0194 152#define mmMEM_EXT_TIMING_CNTL 0x0194
150#define mmMEM_SDRAM_MODE_REG 0x0198 153#define mmMEM_SDRAM_MODE_REG 0x0198
151#define mmMEM_IO_CNTL 0x019C 154#define mmMEM_IO_CNTL 0x019C
152#define mmMC_DEBUG 0x01A0 155#define mmMC_DEBUG 0x01A0
153#define mmMC_BIST_CTRL 0x01A4 156#define mmMC_BIST_CTRL 0x01A4
154#define mmMC_BIST_COLLAR_READ 0x01A8 157#define mmMC_BIST_COLLAR_READ 0x01A8
155#define mmTC_MISMATCH 0x01AC 158#define mmTC_MISMATCH 0x01AC
156#define mmMC_PERF_MON_CNTL 0x01B0 159#define mmMC_PERF_MON_CNTL 0x01B0
157#define mmMC_PERF_COUNTERS 0x01B4 160#define mmMC_PERF_COUNTERS 0x01B4
158/* Block MC End: */ 161/* Block MC End: */
159 162
163/* Block BM Start: */
164#define mmBM_EXT_MEM_BANDWIDTH 0x0A00
165#define mmBM_OFFSET 0x0A04
166#define mmBM_MEM_EXT_TIMING_CNTL 0x0A08
167#define mmBM_MEM_EXT_CNTL 0x0A0C
168#define mmBM_MEM_MODE_REG 0x0A10
169#define mmBM_MEM_IO_CNTL 0x0A18
170#define mmBM_CONFIG 0x0A1C
171#define mmBM_STATUS 0x0A20
172#define mmBM_DEBUG 0x0A24
173#define mmBM_PERF_MON_CNTL 0x0A28
174#define mmBM_PERF_COUNTERS 0x0A2C
175#define mmBM_PERF2_MON_CNTL 0x0A30
176#define mmBM_PERF2_COUNTERS 0x0A34
177/* Block BM End: */
178
160/* Block RBBM Start: */ 179/* Block RBBM Start: */
161#define mmWAIT_UNTIL 0x1400 180#define mmWAIT_UNTIL 0x1400
162#define mmISYNC_CNTL 0x1404 181#define mmISYNC_CNTL 0x1404
@@ -176,439 +195,575 @@
176/* Block CG End: */ 195/* Block CG End: */
177 196
178/* default value definitions */ 197/* default value definitions */
179#define defWRAP_TOP_DIR 0x00000000 198#define defWRAP_TOP_DIR 0x00000000
180#define defWRAP_START_DIR 0x00000000 199#define defWRAP_START_DIR 0x00000000
181#define defCFGREG_BASE 0x00000000 200#define defCFGREG_BASE 0x00000000
182#define defCIF_IO 0x000C0902 201#define defCIF_IO 0x000C0902
183#define defINTF_CNTL 0x00000011 202#define defINTF_CNTL 0x00000011
184#define defCPU_DEFAULTS 0x00000006 203#define defCPU_DEFAULTS 0x00000006
185#define defHW_INT 0x00000000 204#define defHW_INT 0x00000000
186#define defMC_EXT_MEM_LOCATION 0x07ff0000 205#define defMC_EXT_MEM_LOCATION 0x07ff0000
187#define defTC_MISMATCH 0x00000000 206#define defTC_MISMATCH 0x00000000
188 207
189#define W100_CFG_BASE 0x0 208#define W100_CFG_BASE 0x0
190#define W100_CFG_LEN 0x10 209#define W100_CFG_LEN 0x10
191#define W100_REG_BASE 0x10000 210#define W100_REG_BASE 0x10000
192#define W100_REG_LEN 0x2000 211#define W100_REG_LEN 0x2000
193#define MEM_INT_BASE_VALUE 0x100000 212#define MEM_INT_BASE_VALUE 0x100000
194#define MEM_INT_TOP_VALUE_W100 0x15ffff
195#define MEM_EXT_BASE_VALUE 0x800000 213#define MEM_EXT_BASE_VALUE 0x800000
196#define MEM_EXT_TOP_VALUE 0x9fffff 214#define MEM_INT_SIZE 0x05ffff
215#define MEM_WINDOW_BASE 0x100000
216#define MEM_WINDOW_SIZE 0xf00000
217
197#define WRAP_BUF_BASE_VALUE 0x80000 218#define WRAP_BUF_BASE_VALUE 0x80000
198#define WRAP_BUF_TOP_VALUE 0xbffff 219#define WRAP_BUF_TOP_VALUE 0xbffff
199 220
221#define CHIP_ID_W100 0x57411002
222#define CHIP_ID_W3200 0x56441002
223#define CHIP_ID_W3220 0x57441002
200 224
201/* data structure definitions */ 225/* Register structure definitions */
202 226
203struct wrap_top_dir_t { 227struct wrap_top_dir_t {
204 unsigned long top_addr : 23; 228 unsigned long top_addr : 23;
205 unsigned long : 9; 229 unsigned long : 9;
206} __attribute__((packed)); 230} __attribute__((packed));
207 231
208union wrap_top_dir_u { 232union wrap_top_dir_u {
209 unsigned long val : 32; 233 unsigned long val : 32;
210 struct wrap_top_dir_t f; 234 struct wrap_top_dir_t f;
211} __attribute__((packed)); 235} __attribute__((packed));
212 236
213struct wrap_start_dir_t { 237struct wrap_start_dir_t {
214 unsigned long start_addr : 23; 238 unsigned long start_addr : 23;
215 unsigned long : 9; 239 unsigned long : 9;
216} __attribute__((packed)); 240} __attribute__((packed));
217 241
218union wrap_start_dir_u { 242union wrap_start_dir_u {
219 unsigned long val : 32; 243 unsigned long val : 32;
220 struct wrap_start_dir_t f; 244 struct wrap_start_dir_t f;
221} __attribute__((packed)); 245} __attribute__((packed));
222 246
223struct cif_cntl_t { 247struct cif_cntl_t {
224 unsigned long swap_reg : 2; 248 unsigned long swap_reg : 2;
225 unsigned long swap_fbuf_1 : 2; 249 unsigned long swap_fbuf_1 : 2;
226 unsigned long swap_fbuf_2 : 2; 250 unsigned long swap_fbuf_2 : 2;
227 unsigned long swap_fbuf_3 : 2; 251 unsigned long swap_fbuf_3 : 2;
228 unsigned long pmi_int_disable : 1; 252 unsigned long pmi_int_disable : 1;
229 unsigned long pmi_schmen_disable : 1; 253 unsigned long pmi_schmen_disable : 1;
230 unsigned long intb_oe : 1; 254 unsigned long intb_oe : 1;
231 unsigned long en_wait_to_compensate_dq_prop_dly : 1; 255 unsigned long en_wait_to_compensate_dq_prop_dly : 1;
232 unsigned long compensate_wait_rd_size : 2; 256 unsigned long compensate_wait_rd_size : 2;
233 unsigned long wait_asserted_timeout_val : 2; 257 unsigned long wait_asserted_timeout_val : 2;
234 unsigned long wait_masked_val : 2; 258 unsigned long wait_masked_val : 2;
235 unsigned long en_wait_timeout : 1; 259 unsigned long en_wait_timeout : 1;
236 unsigned long en_one_clk_setup_before_wait : 1; 260 unsigned long en_one_clk_setup_before_wait : 1;
237 unsigned long interrupt_active_high : 1; 261 unsigned long interrupt_active_high : 1;
238 unsigned long en_overwrite_straps : 1; 262 unsigned long en_overwrite_straps : 1;
239 unsigned long strap_wait_active_hi : 1; 263 unsigned long strap_wait_active_hi : 1;
240 unsigned long lat_busy_count : 2; 264 unsigned long lat_busy_count : 2;
241 unsigned long lat_rd_pm4_sclk_busy : 1; 265 unsigned long lat_rd_pm4_sclk_busy : 1;
242 unsigned long dis_system_bits : 1; 266 unsigned long dis_system_bits : 1;
243 unsigned long dis_mr : 1; 267 unsigned long dis_mr : 1;
244 unsigned long cif_spare_1 : 4; 268 unsigned long cif_spare_1 : 4;
245} __attribute__((packed)); 269} __attribute__((packed));
246 270
247union cif_cntl_u { 271union cif_cntl_u {
248 unsigned long val : 32; 272 unsigned long val : 32;
249 struct cif_cntl_t f; 273 struct cif_cntl_t f;
250} __attribute__((packed)); 274} __attribute__((packed));
251 275
252struct cfgreg_base_t { 276struct cfgreg_base_t {
253 unsigned long cfgreg_base : 24; 277 unsigned long cfgreg_base : 24;
254 unsigned long : 8; 278 unsigned long : 8;
255} __attribute__((packed)); 279} __attribute__((packed));
256 280
257union cfgreg_base_u { 281union cfgreg_base_u {
258 unsigned long val : 32; 282 unsigned long val : 32;
259 struct cfgreg_base_t f; 283 struct cfgreg_base_t f;
260} __attribute__((packed)); 284} __attribute__((packed));
261 285
262struct cif_io_t { 286struct cif_io_t {
263 unsigned long dq_srp : 1; 287 unsigned long dq_srp : 1;
264 unsigned long dq_srn : 1; 288 unsigned long dq_srn : 1;
265 unsigned long dq_sp : 4; 289 unsigned long dq_sp : 4;
266 unsigned long dq_sn : 4; 290 unsigned long dq_sn : 4;
267 unsigned long waitb_srp : 1; 291 unsigned long waitb_srp : 1;
268 unsigned long waitb_srn : 1; 292 unsigned long waitb_srn : 1;
269 unsigned long waitb_sp : 4; 293 unsigned long waitb_sp : 4;
270 unsigned long waitb_sn : 4; 294 unsigned long waitb_sn : 4;
271 unsigned long intb_srp : 1; 295 unsigned long intb_srp : 1;
272 unsigned long intb_srn : 1; 296 unsigned long intb_srn : 1;
273 unsigned long intb_sp : 4; 297 unsigned long intb_sp : 4;
274 unsigned long intb_sn : 4; 298 unsigned long intb_sn : 4;
275 unsigned long : 2; 299 unsigned long : 2;
276} __attribute__((packed)); 300} __attribute__((packed));
277 301
278union cif_io_u { 302union cif_io_u {
279 unsigned long val : 32; 303 unsigned long val : 32;
280 struct cif_io_t f; 304 struct cif_io_t f;
281} __attribute__((packed)); 305} __attribute__((packed));
282 306
283struct cif_read_dbg_t { 307struct cif_read_dbg_t {
284 unsigned long unpacker_pre_fetch_trig_gen : 2; 308 unsigned long unpacker_pre_fetch_trig_gen : 2;
285 unsigned long dly_second_rd_fetch_trig : 1; 309 unsigned long dly_second_rd_fetch_trig : 1;
286 unsigned long rst_rd_burst_id : 1; 310 unsigned long rst_rd_burst_id : 1;
287 unsigned long dis_rd_burst_id : 1; 311 unsigned long dis_rd_burst_id : 1;
288 unsigned long en_block_rd_when_packer_is_not_emp : 1; 312 unsigned long en_block_rd_when_packer_is_not_emp : 1;
289 unsigned long dis_pre_fetch_cntl_sm : 1; 313 unsigned long dis_pre_fetch_cntl_sm : 1;
290 unsigned long rbbm_chrncy_dis : 1; 314 unsigned long rbbm_chrncy_dis : 1;
291 unsigned long rbbm_rd_after_wr_lat : 2; 315 unsigned long rbbm_rd_after_wr_lat : 2;
292 unsigned long dis_be_during_rd : 1; 316 unsigned long dis_be_during_rd : 1;
293 unsigned long one_clk_invalidate_pulse : 1; 317 unsigned long one_clk_invalidate_pulse : 1;
294 unsigned long dis_chnl_priority : 1; 318 unsigned long dis_chnl_priority : 1;
295 unsigned long rst_read_path_a_pls : 1; 319 unsigned long rst_read_path_a_pls : 1;
296 unsigned long rst_read_path_b_pls : 1; 320 unsigned long rst_read_path_b_pls : 1;
297 unsigned long dis_reg_rd_fetch_trig : 1; 321 unsigned long dis_reg_rd_fetch_trig : 1;
298 unsigned long dis_rd_fetch_trig_from_ind_addr : 1; 322 unsigned long dis_rd_fetch_trig_from_ind_addr : 1;
299 unsigned long dis_rd_same_byte_to_trig_fetch : 1; 323 unsigned long dis_rd_same_byte_to_trig_fetch : 1;
300 unsigned long dis_dir_wrap : 1; 324 unsigned long dis_dir_wrap : 1;
301 unsigned long dis_ring_buf_to_force_dec : 1; 325 unsigned long dis_ring_buf_to_force_dec : 1;
302 unsigned long dis_addr_comp_in_16bit : 1; 326 unsigned long dis_addr_comp_in_16bit : 1;
303 unsigned long clr_w : 1; 327 unsigned long clr_w : 1;
304 unsigned long err_rd_tag_is_3 : 1; 328 unsigned long err_rd_tag_is_3 : 1;
305 unsigned long err_load_when_ful_a : 1; 329 unsigned long err_load_when_ful_a : 1;
306 unsigned long err_load_when_ful_b : 1; 330 unsigned long err_load_when_ful_b : 1;
307 unsigned long : 7; 331 unsigned long : 7;
308} __attribute__((packed)); 332} __attribute__((packed));
309 333
310union cif_read_dbg_u { 334union cif_read_dbg_u {
311 unsigned long val : 32; 335 unsigned long val : 32;
312 struct cif_read_dbg_t f; 336 struct cif_read_dbg_t f;
313} __attribute__((packed)); 337} __attribute__((packed));
314 338
315struct cif_write_dbg_t { 339struct cif_write_dbg_t {
316 unsigned long packer_timeout_count : 2; 340 unsigned long packer_timeout_count : 2;
317 unsigned long en_upper_load_cond : 1; 341 unsigned long en_upper_load_cond : 1;
318 unsigned long en_chnl_change_cond : 1; 342 unsigned long en_chnl_change_cond : 1;
319 unsigned long dis_addr_comp_cond : 1; 343 unsigned long dis_addr_comp_cond : 1;
320 unsigned long dis_load_same_byte_addr_cond : 1; 344 unsigned long dis_load_same_byte_addr_cond : 1;
321 unsigned long dis_timeout_cond : 1; 345 unsigned long dis_timeout_cond : 1;
322 unsigned long dis_timeout_during_rbbm : 1; 346 unsigned long dis_timeout_during_rbbm : 1;
323 unsigned long dis_packer_ful_during_rbbm_timeout : 1; 347 unsigned long dis_packer_ful_during_rbbm_timeout : 1;
324 unsigned long en_dword_split_to_rbbm : 1; 348 unsigned long en_dword_split_to_rbbm : 1;
325 unsigned long en_dummy_val : 1; 349 unsigned long en_dummy_val : 1;
326 unsigned long dummy_val_sel : 1; 350 unsigned long dummy_val_sel : 1;
327 unsigned long mask_pm4_wrptr_dec : 1; 351 unsigned long mask_pm4_wrptr_dec : 1;
328 unsigned long dis_mc_clean_cond : 1; 352 unsigned long dis_mc_clean_cond : 1;
329 unsigned long err_two_reqi_during_ful : 1; 353 unsigned long err_two_reqi_during_ful : 1;
330 unsigned long err_reqi_during_idle_clk : 1; 354 unsigned long err_reqi_during_idle_clk : 1;
331 unsigned long err_global : 1; 355 unsigned long err_global : 1;
332 unsigned long en_wr_buf_dbg_load : 1; 356 unsigned long en_wr_buf_dbg_load : 1;
333 unsigned long en_wr_buf_dbg_path : 1; 357 unsigned long en_wr_buf_dbg_path : 1;
334 unsigned long sel_wr_buf_byte : 3; 358 unsigned long sel_wr_buf_byte : 3;
335 unsigned long dis_rd_flush_wr : 1; 359 unsigned long dis_rd_flush_wr : 1;
336 unsigned long dis_packer_ful_cond : 1; 360 unsigned long dis_packer_ful_cond : 1;
337 unsigned long dis_invalidate_by_ops_chnl : 1; 361 unsigned long dis_invalidate_by_ops_chnl : 1;
338 unsigned long en_halt_when_reqi_err : 1; 362 unsigned long en_halt_when_reqi_err : 1;
339 unsigned long cif_spare_2 : 5; 363 unsigned long cif_spare_2 : 5;
340 unsigned long : 1; 364 unsigned long : 1;
341} __attribute__((packed)); 365} __attribute__((packed));
342 366
343union cif_write_dbg_u { 367union cif_write_dbg_u {
344 unsigned long val : 32; 368 unsigned long val : 32;
345 struct cif_write_dbg_t f; 369 struct cif_write_dbg_t f;
346} __attribute__((packed)); 370} __attribute__((packed));
347 371
348 372
349struct intf_cntl_t { 373struct intf_cntl_t {
350 unsigned char ad_inc_a : 1; 374 unsigned char ad_inc_a : 1;
351 unsigned char ring_buf_a : 1; 375 unsigned char ring_buf_a : 1;
352 unsigned char rd_fetch_trigger_a : 1; 376 unsigned char rd_fetch_trigger_a : 1;
353 unsigned char rd_data_rdy_a : 1; 377 unsigned char rd_data_rdy_a : 1;
354 unsigned char ad_inc_b : 1; 378 unsigned char ad_inc_b : 1;
355 unsigned char ring_buf_b : 1; 379 unsigned char ring_buf_b : 1;
356 unsigned char rd_fetch_trigger_b : 1; 380 unsigned char rd_fetch_trigger_b : 1;
357 unsigned char rd_data_rdy_b : 1; 381 unsigned char rd_data_rdy_b : 1;
358} __attribute__((packed)); 382} __attribute__((packed));
359 383
360union intf_cntl_u { 384union intf_cntl_u {
361 unsigned char val : 8; 385 unsigned char val : 8;
362 struct intf_cntl_t f; 386 struct intf_cntl_t f;
363} __attribute__((packed)); 387} __attribute__((packed));
364 388
365struct cpu_defaults_t { 389struct cpu_defaults_t {
366 unsigned char unpack_rd_data : 1; 390 unsigned char unpack_rd_data : 1;
367 unsigned char access_ind_addr_a: 1; 391 unsigned char access_ind_addr_a : 1;
368 unsigned char access_ind_addr_b: 1; 392 unsigned char access_ind_addr_b : 1;
369 unsigned char access_scratch_reg : 1; 393 unsigned char access_scratch_reg : 1;
370 unsigned char pack_wr_data : 1; 394 unsigned char pack_wr_data : 1;
371 unsigned char transition_size : 1; 395 unsigned char transition_size : 1;
372 unsigned char en_read_buf_mode : 1; 396 unsigned char en_read_buf_mode : 1;
373 unsigned char rd_fetch_scratch : 1; 397 unsigned char rd_fetch_scratch : 1;
374} __attribute__((packed)); 398} __attribute__((packed));
375 399
376union cpu_defaults_u { 400union cpu_defaults_u {
377 unsigned char val : 8; 401 unsigned char val : 8;
378 struct cpu_defaults_t f; 402 struct cpu_defaults_t f;
403} __attribute__((packed));
404
405struct crtc_total_t {
406 unsigned long crtc_h_total : 10;
407 unsigned long : 6;
408 unsigned long crtc_v_total : 10;
409 unsigned long : 6;
410} __attribute__((packed));
411
412union crtc_total_u {
413 unsigned long val : 32;
414 struct crtc_total_t f;
415} __attribute__((packed));
416
417struct crtc_ss_t {
418 unsigned long ss_start : 10;
419 unsigned long : 6;
420 unsigned long ss_end : 10;
421 unsigned long : 2;
422 unsigned long ss_align : 1;
423 unsigned long ss_pol : 1;
424 unsigned long ss_run_mode : 1;
425 unsigned long ss_en : 1;
426} __attribute__((packed));
427
428union crtc_ss_u {
429 unsigned long val : 32;
430 struct crtc_ss_t f;
431} __attribute__((packed));
432
433struct active_h_disp_t {
434 unsigned long active_h_start : 10;
435 unsigned long : 6;
436 unsigned long active_h_end : 10;
437 unsigned long : 6;
438} __attribute__((packed));
439
440union active_h_disp_u {
441 unsigned long val : 32;
442 struct active_h_disp_t f;
443} __attribute__((packed));
444
445struct active_v_disp_t {
446 unsigned long active_v_start : 10;
447 unsigned long : 6;
448 unsigned long active_v_end : 10;
449 unsigned long : 6;
450} __attribute__((packed));
451
452union active_v_disp_u {
453 unsigned long val : 32;
454 struct active_v_disp_t f;
455} __attribute__((packed));
456
457struct graphic_h_disp_t {
458 unsigned long graphic_h_start : 10;
459 unsigned long : 6;
460 unsigned long graphic_h_end : 10;
461 unsigned long : 6;
462} __attribute__((packed));
463
464union graphic_h_disp_u {
465 unsigned long val : 32;
466 struct graphic_h_disp_t f;
467} __attribute__((packed));
468
469struct graphic_v_disp_t {
470 unsigned long graphic_v_start : 10;
471 unsigned long : 6;
472 unsigned long graphic_v_end : 10;
473 unsigned long : 6;
474} __attribute__((packed));
475
476union graphic_v_disp_u{
477 unsigned long val : 32;
478 struct graphic_v_disp_t f;
479} __attribute__((packed));
480
481struct graphic_ctrl_t_w100 {
482 unsigned long color_depth : 3;
483 unsigned long portrait_mode : 2;
484 unsigned long low_power_on : 1;
485 unsigned long req_freq : 4;
486 unsigned long en_crtc : 1;
487 unsigned long en_graphic_req : 1;
488 unsigned long en_graphic_crtc : 1;
489 unsigned long total_req_graphic : 9;
490 unsigned long lcd_pclk_on : 1;
491 unsigned long lcd_sclk_on : 1;
492 unsigned long pclk_running : 1;
493 unsigned long sclk_running : 1;
494 unsigned long : 6;
495} __attribute__((packed));
496
497struct graphic_ctrl_t_w32xx {
498 unsigned long color_depth : 3;
499 unsigned long portrait_mode : 2;
500 unsigned long low_power_on : 1;
501 unsigned long req_freq : 4;
502 unsigned long en_crtc : 1;
503 unsigned long en_graphic_req : 1;
504 unsigned long en_graphic_crtc : 1;
505 unsigned long total_req_graphic : 10;
506 unsigned long lcd_pclk_on : 1;
507 unsigned long lcd_sclk_on : 1;
508 unsigned long pclk_running : 1;
509 unsigned long sclk_running : 1;
510 unsigned long : 5;
511} __attribute__((packed));
512
513union graphic_ctrl_u {
514 unsigned long val : 32;
515 struct graphic_ctrl_t_w100 f_w100;
516 struct graphic_ctrl_t_w32xx f_w32xx;
379} __attribute__((packed)); 517} __attribute__((packed));
380 518
381struct video_ctrl_t { 519struct video_ctrl_t {
382 unsigned long video_mode : 1; 520 unsigned long video_mode : 1;
383 unsigned long keyer_en : 1; 521 unsigned long keyer_en : 1;
384 unsigned long en_video_req : 1; 522 unsigned long en_video_req : 1;
385 unsigned long en_graphic_req_video : 1; 523 unsigned long en_graphic_req_video : 1;
386 unsigned long en_video_crtc : 1; 524 unsigned long en_video_crtc : 1;
387 unsigned long video_hor_exp : 2; 525 unsigned long video_hor_exp : 2;
388 unsigned long video_ver_exp : 2; 526 unsigned long video_ver_exp : 2;
389 unsigned long uv_combine : 1; 527 unsigned long uv_combine : 1;
390 unsigned long total_req_video : 9; 528 unsigned long total_req_video : 9;
391 unsigned long video_ch_sel : 1; 529 unsigned long video_ch_sel : 1;
392 unsigned long video_portrait : 2; 530 unsigned long video_portrait : 2;
393 unsigned long yuv2rgb_en : 1; 531 unsigned long yuv2rgb_en : 1;
394 unsigned long yuv2rgb_option : 1; 532 unsigned long yuv2rgb_option : 1;
395 unsigned long video_inv_hor : 1; 533 unsigned long video_inv_hor : 1;
396 unsigned long video_inv_ver : 1; 534 unsigned long video_inv_ver : 1;
397 unsigned long gamma_sel : 2; 535 unsigned long gamma_sel : 2;
398 unsigned long dis_limit : 1; 536 unsigned long dis_limit : 1;
399 unsigned long en_uv_hblend : 1; 537 unsigned long en_uv_hblend : 1;
400 unsigned long rgb_gamma_sel : 2; 538 unsigned long rgb_gamma_sel : 2;
401} __attribute__((packed)); 539} __attribute__((packed));
402 540
403union video_ctrl_u { 541union video_ctrl_u {
404 unsigned long val : 32; 542 unsigned long val : 32;
405 struct video_ctrl_t f; 543 struct video_ctrl_t f;
406} __attribute__((packed)); 544} __attribute__((packed));
407 545
408struct disp_db_buf_cntl_rd_t { 546struct disp_db_buf_cntl_rd_t {
409 unsigned long en_db_buf : 1; 547 unsigned long en_db_buf : 1;
410 unsigned long update_db_buf_done : 1; 548 unsigned long update_db_buf_done : 1;
411 unsigned long db_buf_cntl : 6; 549 unsigned long db_buf_cntl : 6;
412 unsigned long : 24; 550 unsigned long : 24;
413} __attribute__((packed)); 551} __attribute__((packed));
414 552
415union disp_db_buf_cntl_rd_u { 553union disp_db_buf_cntl_rd_u {
416 unsigned long val : 32; 554 unsigned long val : 32;
417 struct disp_db_buf_cntl_rd_t f; 555 struct disp_db_buf_cntl_rd_t f;
418} __attribute__((packed)); 556} __attribute__((packed));
419 557
420struct disp_db_buf_cntl_wr_t { 558struct disp_db_buf_cntl_wr_t {
421 unsigned long en_db_buf : 1; 559 unsigned long en_db_buf : 1;
422 unsigned long update_db_buf : 1; 560 unsigned long update_db_buf : 1;
423 unsigned long db_buf_cntl : 6; 561 unsigned long db_buf_cntl : 6;
424 unsigned long : 24; 562 unsigned long : 24;
425} __attribute__((packed)); 563} __attribute__((packed));
426 564
427union disp_db_buf_cntl_wr_u { 565union disp_db_buf_cntl_wr_u {
428 unsigned long val : 32; 566 unsigned long val : 32;
429 struct disp_db_buf_cntl_wr_t f; 567 struct disp_db_buf_cntl_wr_t f;
430} __attribute__((packed)); 568} __attribute__((packed));
431 569
432struct gamma_value1_t { 570struct gamma_value1_t {
433 unsigned long gamma1 : 8; 571 unsigned long gamma1 : 8;
434 unsigned long gamma2 : 8; 572 unsigned long gamma2 : 8;
435 unsigned long gamma3 : 8; 573 unsigned long gamma3 : 8;
436 unsigned long gamma4 : 8; 574 unsigned long gamma4 : 8;
437} __attribute__((packed)); 575} __attribute__((packed));
438 576
439union gamma_value1_u { 577union gamma_value1_u {
440 unsigned long val : 32; 578 unsigned long val : 32;
441 struct gamma_value1_t f; 579 struct gamma_value1_t f;
442} __attribute__((packed)); 580} __attribute__((packed));
443 581
444struct gamma_value2_t { 582struct gamma_value2_t {
445 unsigned long gamma5 : 8; 583 unsigned long gamma5 : 8;
446 unsigned long gamma6 : 8; 584 unsigned long gamma6 : 8;
447 unsigned long gamma7 : 8; 585 unsigned long gamma7 : 8;
448 unsigned long gamma8 : 8; 586 unsigned long gamma8 : 8;
449} __attribute__((packed)); 587} __attribute__((packed));
450 588
451union gamma_value2_u { 589union gamma_value2_u {
452 unsigned long val : 32; 590 unsigned long val : 32;
453 struct gamma_value2_t f; 591 struct gamma_value2_t f;
454} __attribute__((packed)); 592} __attribute__((packed));
455 593
456struct gamma_slope_t { 594struct gamma_slope_t {
457 unsigned long slope1 : 3; 595 unsigned long slope1 : 3;
458 unsigned long slope2 : 3; 596 unsigned long slope2 : 3;
459 unsigned long slope3 : 3; 597 unsigned long slope3 : 3;
460 unsigned long slope4 : 3; 598 unsigned long slope4 : 3;
461 unsigned long slope5 : 3; 599 unsigned long slope5 : 3;
462 unsigned long slope6 : 3; 600 unsigned long slope6 : 3;
463 unsigned long slope7 : 3; 601 unsigned long slope7 : 3;
464 unsigned long slope8 : 3; 602 unsigned long slope8 : 3;
465 unsigned long : 8; 603 unsigned long : 8;
466} __attribute__((packed)); 604} __attribute__((packed));
467 605
468union gamma_slope_u { 606union gamma_slope_u {
469 unsigned long val : 32; 607 unsigned long val : 32;
470 struct gamma_slope_t f; 608 struct gamma_slope_t f;
471} __attribute__((packed)); 609} __attribute__((packed));
472 610
473struct mc_ext_mem_location_t { 611struct mc_ext_mem_location_t {
474 unsigned long mc_ext_mem_start : 16; 612 unsigned long mc_ext_mem_start : 16;
475 unsigned long mc_ext_mem_top : 16; 613 unsigned long mc_ext_mem_top : 16;
476} __attribute__((packed)); 614} __attribute__((packed));
477 615
478union mc_ext_mem_location_u { 616union mc_ext_mem_location_u {
479 unsigned long val : 32; 617 unsigned long val : 32;
480 struct mc_ext_mem_location_t f; 618 struct mc_ext_mem_location_t f;
619} __attribute__((packed));
620
621struct mc_fb_location_t {
622 unsigned long mc_fb_start : 16;
623 unsigned long mc_fb_top : 16;
624} __attribute__((packed));
625
626union mc_fb_location_u {
627 unsigned long val : 32;
628 struct mc_fb_location_t f;
481} __attribute__((packed)); 629} __attribute__((packed));
482 630
483struct clk_pin_cntl_t { 631struct clk_pin_cntl_t {
484 unsigned long osc_en : 1; 632 unsigned long osc_en : 1;
485 unsigned long osc_gain : 5; 633 unsigned long osc_gain : 5;
486 unsigned long dont_use_xtalin : 1; 634 unsigned long dont_use_xtalin : 1;
487 unsigned long xtalin_pm_en : 1; 635 unsigned long xtalin_pm_en : 1;
488 unsigned long xtalin_dbl_en : 1; 636 unsigned long xtalin_dbl_en : 1;
489 unsigned long : 7; 637 unsigned long : 7;
490 unsigned long cg_debug : 16; 638 unsigned long cg_debug : 16;
491} __attribute__((packed)); 639} __attribute__((packed));
492 640
493union clk_pin_cntl_u { 641union clk_pin_cntl_u {
494 unsigned long val : 32; 642 unsigned long val : 32;
495 struct clk_pin_cntl_t f; 643 struct clk_pin_cntl_t f;
496} __attribute__((packed)); 644} __attribute__((packed));
497 645
498struct pll_ref_fb_div_t { 646struct pll_ref_fb_div_t {
499 unsigned long pll_ref_div : 4; 647 unsigned long pll_ref_div : 4;
500 unsigned long : 4; 648 unsigned long : 4;
501 unsigned long pll_fb_div_int : 6; 649 unsigned long pll_fb_div_int : 6;
502 unsigned long : 2; 650 unsigned long : 2;
503 unsigned long pll_fb_div_frac : 3; 651 unsigned long pll_fb_div_frac : 3;
504 unsigned long : 1; 652 unsigned long : 1;
505 unsigned long pll_reset_time : 4; 653 unsigned long pll_reset_time : 4;
506 unsigned long pll_lock_time : 8; 654 unsigned long pll_lock_time : 8;
507} __attribute__((packed)); 655} __attribute__((packed));
508 656
509union pll_ref_fb_div_u { 657union pll_ref_fb_div_u {
510 unsigned long val : 32; 658 unsigned long val : 32;
511 struct pll_ref_fb_div_t f; 659 struct pll_ref_fb_div_t f;
512} __attribute__((packed)); 660} __attribute__((packed));
513 661
514struct pll_cntl_t { 662struct pll_cntl_t {
515 unsigned long pll_pwdn : 1; 663 unsigned long pll_pwdn : 1;
516 unsigned long pll_reset : 1; 664 unsigned long pll_reset : 1;
517 unsigned long pll_pm_en : 1; 665 unsigned long pll_pm_en : 1;
518 unsigned long pll_mode : 1; 666 unsigned long pll_mode : 1;
519 unsigned long pll_refclk_sel : 1; 667 unsigned long pll_refclk_sel : 1;
520 unsigned long pll_fbclk_sel : 1; 668 unsigned long pll_fbclk_sel : 1;
521 unsigned long pll_tcpoff : 1; 669 unsigned long pll_tcpoff : 1;
522 unsigned long pll_pcp : 3; 670 unsigned long pll_pcp : 3;
523 unsigned long pll_pvg : 3; 671 unsigned long pll_pvg : 3;
524 unsigned long pll_vcofr : 1; 672 unsigned long pll_vcofr : 1;
525 unsigned long pll_ioffset : 2; 673 unsigned long pll_ioffset : 2;
526 unsigned long pll_pecc_mode : 2; 674 unsigned long pll_pecc_mode : 2;
527 unsigned long pll_pecc_scon : 2; 675 unsigned long pll_pecc_scon : 2;
528 unsigned long pll_dactal : 4; 676 unsigned long pll_dactal : 4;
529 unsigned long pll_cp_clip : 2; 677 unsigned long pll_cp_clip : 2;
530 unsigned long pll_conf : 3; 678 unsigned long pll_conf : 3;
531 unsigned long pll_mbctrl : 2; 679 unsigned long pll_mbctrl : 2;
532 unsigned long pll_ring_off : 1; 680 unsigned long pll_ring_off : 1;
533} __attribute__((packed)); 681} __attribute__((packed));
534 682
535union pll_cntl_u { 683union pll_cntl_u {
536 unsigned long val : 32; 684 unsigned long val : 32;
537 struct pll_cntl_t f; 685 struct pll_cntl_t f;
538} __attribute__((packed)); 686} __attribute__((packed));
539 687
540struct sclk_cntl_t { 688struct sclk_cntl_t {
541 unsigned long sclk_src_sel : 2; 689 unsigned long sclk_src_sel : 2;
542 unsigned long : 2; 690 unsigned long : 2;
543 unsigned long sclk_post_div_fast : 4; 691 unsigned long sclk_post_div_fast : 4;
544 unsigned long sclk_clkon_hys : 3; 692 unsigned long sclk_clkon_hys : 3;
545 unsigned long sclk_post_div_slow : 4; 693 unsigned long sclk_post_div_slow : 4;
546 unsigned long disp_cg_ok2switch_en : 1; 694 unsigned long disp_cg_ok2switch_en : 1;
547 unsigned long sclk_force_reg : 1; 695 unsigned long sclk_force_reg : 1;
548 unsigned long sclk_force_disp : 1; 696 unsigned long sclk_force_disp : 1;
549 unsigned long sclk_force_mc : 1; 697 unsigned long sclk_force_mc : 1;
550 unsigned long sclk_force_extmc : 1; 698 unsigned long sclk_force_extmc : 1;
551 unsigned long sclk_force_cp : 1; 699 unsigned long sclk_force_cp : 1;
552 unsigned long sclk_force_e2 : 1; 700 unsigned long sclk_force_e2 : 1;
553 unsigned long sclk_force_e3 : 1; 701 unsigned long sclk_force_e3 : 1;
554 unsigned long sclk_force_idct : 1; 702 unsigned long sclk_force_idct : 1;
555 unsigned long sclk_force_bist : 1; 703 unsigned long sclk_force_bist : 1;
556 unsigned long busy_extend_cp : 1; 704 unsigned long busy_extend_cp : 1;
557 unsigned long busy_extend_e2 : 1; 705 unsigned long busy_extend_e2 : 1;
558 unsigned long busy_extend_e3 : 1; 706 unsigned long busy_extend_e3 : 1;
559 unsigned long busy_extend_idct : 1; 707 unsigned long busy_extend_idct : 1;
560 unsigned long : 3; 708 unsigned long : 3;
561} __attribute__((packed)); 709} __attribute__((packed));
562 710
563union sclk_cntl_u { 711union sclk_cntl_u {
564 unsigned long val : 32; 712 unsigned long val : 32;
565 struct sclk_cntl_t f; 713 struct sclk_cntl_t f;
566} __attribute__((packed)); 714} __attribute__((packed));
567 715
568struct pclk_cntl_t { 716struct pclk_cntl_t {
569 unsigned long pclk_src_sel : 2; 717 unsigned long pclk_src_sel : 2;
570 unsigned long : 2; 718 unsigned long : 2;
571 unsigned long pclk_post_div : 4; 719 unsigned long pclk_post_div : 4;
572 unsigned long : 8; 720 unsigned long : 8;
573 unsigned long pclk_force_disp : 1; 721 unsigned long pclk_force_disp : 1;
574 unsigned long : 15; 722 unsigned long : 15;
575} __attribute__((packed)); 723} __attribute__((packed));
576 724
577union pclk_cntl_u { 725union pclk_cntl_u {
578 unsigned long val : 32; 726 unsigned long val : 32;
579 struct pclk_cntl_t f; 727 struct pclk_cntl_t f;
580} __attribute__((packed)); 728} __attribute__((packed));
581 729
730
731#define TESTCLK_SRC_PLL 0x01
732#define TESTCLK_SRC_SCLK 0x02
733#define TESTCLK_SRC_PCLK 0x03
734/* 4 and 5 seem to by XTAL/M */
735#define TESTCLK_SRC_XTAL 0x06
736
582struct clk_test_cntl_t { 737struct clk_test_cntl_t {
583 unsigned long testclk_sel : 4; 738 unsigned long testclk_sel : 4;
584 unsigned long : 3; 739 unsigned long : 3;
585 unsigned long start_check_freq : 1; 740 unsigned long start_check_freq : 1;
586 unsigned long tstcount_rst : 1; 741 unsigned long tstcount_rst : 1;
587 unsigned long : 15; 742 unsigned long : 15;
588 unsigned long test_count : 8; 743 unsigned long test_count : 8;
589} __attribute__((packed)); 744} __attribute__((packed));
590 745
591union clk_test_cntl_u { 746union clk_test_cntl_u {
592 unsigned long val : 32; 747 unsigned long val : 32;
593 struct clk_test_cntl_t f; 748 struct clk_test_cntl_t f;
594} __attribute__((packed)); 749} __attribute__((packed));
595 750
596struct pwrmgt_cntl_t { 751struct pwrmgt_cntl_t {
597 unsigned long pwm_enable : 1; 752 unsigned long pwm_enable : 1;
598 unsigned long : 1; 753 unsigned long : 1;
599 unsigned long pwm_mode_req : 2; 754 unsigned long pwm_mode_req : 2;
600 unsigned long pwm_wakeup_cond : 2; 755 unsigned long pwm_wakeup_cond : 2;
601 unsigned long pwm_fast_noml_hw_en : 1; 756 unsigned long pwm_fast_noml_hw_en : 1;
602 unsigned long pwm_noml_fast_hw_en : 1; 757 unsigned long pwm_noml_fast_hw_en : 1;
603 unsigned long pwm_fast_noml_cond : 4; 758 unsigned long pwm_fast_noml_cond : 4;
604 unsigned long pwm_noml_fast_cond : 4; 759 unsigned long pwm_noml_fast_cond : 4;
605 unsigned long pwm_idle_timer : 8; 760 unsigned long pwm_idle_timer : 8;
606 unsigned long pwm_busy_timer : 8; 761 unsigned long pwm_busy_timer : 8;
607} __attribute__((packed)); 762} __attribute__((packed));
608 763
609union pwrmgt_cntl_u { 764union pwrmgt_cntl_u {
610 unsigned long val : 32; 765 unsigned long val : 32;
611 struct pwrmgt_cntl_t f; 766 struct pwrmgt_cntl_t f;
612} __attribute__((packed)); 767} __attribute__((packed));
613 768
614#endif 769#endif
diff --git a/fs/Kconfig b/fs/Kconfig
index ed78d24ee426..5e817902cb3b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -382,10 +382,8 @@ config QUOTA
382 usage (also called disk quotas). Currently, it works for the 382 usage (also called disk quotas). Currently, it works for the
383 ext2, ext3, and reiserfs file system. ext3 also supports journalled 383 ext2, ext3, and reiserfs file system. ext3 also supports journalled
384 quotas for which you don't need to run quotacheck(8) after an unclean 384 quotas for which you don't need to run quotacheck(8) after an unclean
385 shutdown. You need additional software in order to use quota support 385 shutdown.
386 (you can download sources from 386 For further details, read the Quota mini-HOWTO, available from
387 <http://www.sf.net/projects/linuxquota/>). For further details, read
388 the Quota mini-HOWTO, available from
389 <http://www.tldp.org/docs.html#howto>, or the documentation provided 387 <http://www.tldp.org/docs.html#howto>, or the documentation provided
390 with the quota tools. Probably the quota support is only useful for 388 with the quota tools. Probably the quota support is only useful for
391 multi user systems. If unsure, say N. 389 multi user systems. If unsure, say N.
@@ -403,8 +401,7 @@ config QFMT_V2
403 depends on QUOTA 401 depends on QUOTA
404 help 402 help
405 This quota format allows using quotas with 32-bit UIDs/GIDs. If you 403 This quota format allows using quotas with 32-bit UIDs/GIDs. If you
406 need this functionality say Y here. Note that you will need recent 404 need this functionality say Y here.
407 quota utilities (>= 3.01) for new quota format with this kernel.
408 405
409config QUOTACTL 406config QUOTACTL
410 bool 407 bool
@@ -816,6 +813,18 @@ config RAMFS
816 To compile this as a module, choose M here: the module will be called 813 To compile this as a module, choose M here: the module will be called
817 ramfs. 814 ramfs.
818 815
816config RELAYFS_FS
817 tristate "Relayfs file system support"
818 ---help---
819 Relayfs is a high-speed data relay filesystem designed to provide
820 an efficient mechanism for tools and facilities to relay large
821 amounts of data from kernel space to user space.
822
823 To compile this code as a module, choose M here: the module will be
824 called relayfs.
825
826 If unsure, say N.
827
819endmenu 828endmenu
820 829
821menu "Miscellaneous filesystems" 830menu "Miscellaneous filesystems"
diff --git a/fs/Makefile b/fs/Makefile
index cf95eb894fd5..15158309dee4 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_AUTOFS_FS) += autofs/
90obj-$(CONFIG_AUTOFS4_FS) += autofs4/ 90obj-$(CONFIG_AUTOFS4_FS) += autofs4/
91obj-$(CONFIG_ADFS_FS) += adfs/ 91obj-$(CONFIG_ADFS_FS) += adfs/
92obj-$(CONFIG_UDF_FS) += udf/ 92obj-$(CONFIG_UDF_FS) += udf/
93obj-$(CONFIG_RELAYFS_FS) += relayfs/
93obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ 94obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
94obj-$(CONFIG_JFS_FS) += jfs/ 95obj-$(CONFIG_JFS_FS) += jfs/
95obj-$(CONFIG_XFS_FS) += xfs/ 96obj-$(CONFIG_XFS_FS) += xfs/
diff --git a/fs/bio.c b/fs/bio.c
index 1f2d4649b188..bf3ec9d2b54c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -104,18 +104,22 @@ static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int
104 return bvl; 104 return bvl;
105} 105}
106 106
107/* 107void bio_free(struct bio *bio, struct bio_set *bio_set)
108 * default destructor for a bio allocated with bio_alloc_bioset()
109 */
110static void bio_destructor(struct bio *bio)
111{ 108{
112 const int pool_idx = BIO_POOL_IDX(bio); 109 const int pool_idx = BIO_POOL_IDX(bio);
113 struct bio_set *bs = bio->bi_set;
114 110
115 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); 111 BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
116 112
117 mempool_free(bio->bi_io_vec, bs->bvec_pools[pool_idx]); 113 mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
118 mempool_free(bio, bs->bio_pool); 114 mempool_free(bio, bio_set->bio_pool);
115}
116
117/*
118 * default destructor for a bio allocated with bio_alloc_bioset()
119 */
120static void bio_fs_destructor(struct bio *bio)
121{
122 bio_free(bio, fs_bio_set);
119} 123}
120 124
121inline void bio_init(struct bio *bio) 125inline void bio_init(struct bio *bio)
@@ -171,8 +175,6 @@ struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, stru
171 bio->bi_max_vecs = bvec_slabs[idx].nr_vecs; 175 bio->bi_max_vecs = bvec_slabs[idx].nr_vecs;
172 } 176 }
173 bio->bi_io_vec = bvl; 177 bio->bi_io_vec = bvl;
174 bio->bi_destructor = bio_destructor;
175 bio->bi_set = bs;
176 } 178 }
177out: 179out:
178 return bio; 180 return bio;
@@ -180,7 +182,12 @@ out:
180 182
181struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs) 183struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
182{ 184{
183 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 185 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
186
187 if (bio)
188 bio->bi_destructor = bio_fs_destructor;
189
190 return bio;
184} 191}
185 192
186void zero_fill_bio(struct bio *bio) 193void zero_fill_bio(struct bio *bio)
@@ -273,8 +280,10 @@ struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
273{ 280{
274 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); 281 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
275 282
276 if (b) 283 if (b) {
284 b->bi_destructor = bio_fs_destructor;
277 __bio_clone(b, bio); 285 __bio_clone(b, bio);
286 }
278 287
279 return b; 288 return b;
280} 289}
@@ -1075,6 +1084,7 @@ subsys_initcall(init_bio);
1075 1084
1076EXPORT_SYMBOL(bio_alloc); 1085EXPORT_SYMBOL(bio_alloc);
1077EXPORT_SYMBOL(bio_put); 1086EXPORT_SYMBOL(bio_put);
1087EXPORT_SYMBOL(bio_free);
1078EXPORT_SYMBOL(bio_endio); 1088EXPORT_SYMBOL(bio_endio);
1079EXPORT_SYMBOL(bio_init); 1089EXPORT_SYMBOL(bio_init);
1080EXPORT_SYMBOL(__bio_clone); 1090EXPORT_SYMBOL(__bio_clone);
diff --git a/fs/buffer.c b/fs/buffer.c
index 6a25d7df89b1..1c62203a4906 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -917,8 +917,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
917 * contents - it is a noop if I/O is still in 917 * contents - it is a noop if I/O is still in
918 * flight on potentially older contents. 918 * flight on potentially older contents.
919 */ 919 */
920 wait_on_buffer(bh); 920 ll_rw_block(SWRITE, 1, &bh);
921 ll_rw_block(WRITE, 1, &bh);
922 brelse(bh); 921 brelse(bh);
923 spin_lock(lock); 922 spin_lock(lock);
924 } 923 }
@@ -2793,21 +2792,22 @@ int submit_bh(int rw, struct buffer_head * bh)
2793 2792
2794/** 2793/**
2795 * ll_rw_block: low-level access to block devices (DEPRECATED) 2794 * ll_rw_block: low-level access to block devices (DEPRECATED)
2796 * @rw: whether to %READ or %WRITE or maybe %READA (readahead) 2795 * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
2797 * @nr: number of &struct buffer_heads in the array 2796 * @nr: number of &struct buffer_heads in the array
2798 * @bhs: array of pointers to &struct buffer_head 2797 * @bhs: array of pointers to &struct buffer_head
2799 * 2798 *
2800 * ll_rw_block() takes an array of pointers to &struct buffer_heads, 2799 * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2801 * and requests an I/O operation on them, either a %READ or a %WRITE. 2800 * requests an I/O operation on them, either a %READ or a %WRITE. The third
2802 * The third %READA option is described in the documentation for 2801 * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
2803 * generic_make_request() which ll_rw_block() calls. 2802 * are sent to disk. The fourth %READA option is described in the documentation
2803 * for generic_make_request() which ll_rw_block() calls.
2804 * 2804 *
2805 * This function drops any buffer that it cannot get a lock on (with the 2805 * This function drops any buffer that it cannot get a lock on (with the
2806 * BH_Lock state bit), any buffer that appears to be clean when doing a 2806 * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
2807 * write request, and any buffer that appears to be up-to-date when doing 2807 * clean when doing a write request, and any buffer that appears to be
2808 * read request. Further it marks as clean buffers that are processed for 2808 * up-to-date when doing read request. Further it marks as clean buffers that
2809 * writing (the buffer cache won't assume that they are actually clean until 2809 * are processed for writing (the buffer cache won't assume that they are
2810 * the buffer gets unlocked). 2810 * actually clean until the buffer gets unlocked).
2811 * 2811 *
2812 * ll_rw_block sets b_end_io to simple completion handler that marks 2812 * ll_rw_block sets b_end_io to simple completion handler that marks
2813 * the buffer up-to-date (if approriate), unlocks the buffer and wakes 2813 * the buffer up-to-date (if approriate), unlocks the buffer and wakes
@@ -2823,11 +2823,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2823 for (i = 0; i < nr; i++) { 2823 for (i = 0; i < nr; i++) {
2824 struct buffer_head *bh = bhs[i]; 2824 struct buffer_head *bh = bhs[i];
2825 2825
2826 if (test_set_buffer_locked(bh)) 2826 if (rw == SWRITE)
2827 lock_buffer(bh);
2828 else if (test_set_buffer_locked(bh))
2827 continue; 2829 continue;
2828 2830
2829 get_bh(bh); 2831 get_bh(bh);
2830 if (rw == WRITE) { 2832 if (rw == WRITE || rw == SWRITE) {
2831 if (test_clear_buffer_dirty(bh)) { 2833 if (test_clear_buffer_dirty(bh)) {
2832 bh->b_end_io = end_buffer_write_sync; 2834 bh->b_end_io = end_buffer_write_sync;
2833 submit_bh(WRITE, bh); 2835 submit_bh(WRITE, bh);
@@ -3046,10 +3048,9 @@ struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
3046{ 3048{
3047 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); 3049 struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
3048 if (ret) { 3050 if (ret) {
3049 preempt_disable(); 3051 get_cpu_var(bh_accounting).nr++;
3050 __get_cpu_var(bh_accounting).nr++;
3051 recalc_bh_state(); 3052 recalc_bh_state();
3052 preempt_enable(); 3053 put_cpu_var(bh_accounting);
3053 } 3054 }
3054 return ret; 3055 return ret;
3055} 3056}
@@ -3059,10 +3060,9 @@ void free_buffer_head(struct buffer_head *bh)
3059{ 3060{
3060 BUG_ON(!list_empty(&bh->b_assoc_buffers)); 3061 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3061 kmem_cache_free(bh_cachep, bh); 3062 kmem_cache_free(bh_cachep, bh);
3062 preempt_disable(); 3063 get_cpu_var(bh_accounting).nr--;
3063 __get_cpu_var(bh_accounting).nr--;
3064 recalc_bh_state(); 3064 recalc_bh_state();
3065 preempt_enable(); 3065 put_cpu_var(bh_accounting);
3066} 3066}
3067EXPORT_SYMBOL(free_buffer_head); 3067EXPORT_SYMBOL(free_buffer_head);
3068 3068
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e568cc47a7f9..3217ac5f6bd7 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -836,7 +836,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
836 /* go from value to value + temp_len condensing 836 /* go from value to value + temp_len condensing
837 double commas to singles. Note that this ends up 837 double commas to singles. Note that this ends up
838 allocating a few bytes too many, which is ok */ 838 allocating a few bytes too many, which is ok */
839 vol->password = kcalloc(1, temp_len, GFP_KERNEL); 839 vol->password = kzalloc(temp_len, GFP_KERNEL);
840 if(vol->password == NULL) { 840 if(vol->password == NULL) {
841 printk("CIFS: no memory for pass\n"); 841 printk("CIFS: no memory for pass\n");
842 return 1; 842 return 1;
@@ -851,7 +851,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
851 } 851 }
852 vol->password[j] = 0; 852 vol->password[j] = 0;
853 } else { 853 } else {
854 vol->password = kcalloc(1, temp_len+1, GFP_KERNEL); 854 vol->password = kzalloc(temp_len+1, GFP_KERNEL);
855 if(vol->password == NULL) { 855 if(vol->password == NULL) {
856 printk("CIFS: no memory for pass\n"); 856 printk("CIFS: no memory for pass\n");
857 return 1; 857 return 1;
@@ -1317,7 +1317,7 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
1317 sessinit is sent but no second negprot */ 1317 sessinit is sent but no second negprot */
1318 struct rfc1002_session_packet * ses_init_buf; 1318 struct rfc1002_session_packet * ses_init_buf;
1319 struct smb_hdr * smb_buf; 1319 struct smb_hdr * smb_buf;
1320 ses_init_buf = kcalloc(1, sizeof(struct rfc1002_session_packet), GFP_KERNEL); 1320 ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL);
1321 if(ses_init_buf) { 1321 if(ses_init_buf) {
1322 ses_init_buf->trailer.session_req.called_len = 32; 1322 ses_init_buf->trailer.session_req.called_len = 32;
1323 rfc1002mangle(ses_init_buf->trailer.session_req.called_name, 1323 rfc1002mangle(ses_init_buf->trailer.session_req.called_name,
@@ -1964,7 +1964,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
1964/* We look for obvious messed up bcc or strings in response so we do not go off 1964/* We look for obvious messed up bcc or strings in response so we do not go off
1965 the end since (at least) WIN2K and Windows XP have a major bug in not null 1965 the end since (at least) WIN2K and Windows XP have a major bug in not null
1966 terminating last Unicode string in response */ 1966 terminating last Unicode string in response */
1967 ses->serverOS = kcalloc(1, 2 * (len + 1), GFP_KERNEL); 1967 ses->serverOS = kzalloc(2 * (len + 1), GFP_KERNEL);
1968 if(ses->serverOS == NULL) 1968 if(ses->serverOS == NULL)
1969 goto sesssetup_nomem; 1969 goto sesssetup_nomem;
1970 cifs_strfromUCS_le(ses->serverOS, 1970 cifs_strfromUCS_le(ses->serverOS,
@@ -1976,7 +1976,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
1976 if (remaining_words > 0) { 1976 if (remaining_words > 0) {
1977 len = UniStrnlen((wchar_t *)bcc_ptr, 1977 len = UniStrnlen((wchar_t *)bcc_ptr,
1978 remaining_words-1); 1978 remaining_words-1);
1979 ses->serverNOS = kcalloc(1, 2 * (len + 1),GFP_KERNEL); 1979 ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL);
1980 if(ses->serverNOS == NULL) 1980 if(ses->serverNOS == NULL)
1981 goto sesssetup_nomem; 1981 goto sesssetup_nomem;
1982 cifs_strfromUCS_le(ses->serverNOS, 1982 cifs_strfromUCS_le(ses->serverNOS,
@@ -1994,7 +1994,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
1994 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 1994 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
1995 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 1995 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
1996 ses->serverDomain = 1996 ses->serverDomain =
1997 kcalloc(1, 2*(len+1),GFP_KERNEL); 1997 kzalloc(2*(len+1),GFP_KERNEL);
1998 if(ses->serverDomain == NULL) 1998 if(ses->serverDomain == NULL)
1999 goto sesssetup_nomem; 1999 goto sesssetup_nomem;
2000 cifs_strfromUCS_le(ses->serverDomain, 2000 cifs_strfromUCS_le(ses->serverDomain,
@@ -2005,22 +2005,22 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2005 } /* else no more room so create dummy domain string */ 2005 } /* else no more room so create dummy domain string */
2006 else 2006 else
2007 ses->serverDomain = 2007 ses->serverDomain =
2008 kcalloc(1, 2, GFP_KERNEL); 2008 kzalloc(2, GFP_KERNEL);
2009 } else { /* no room so create dummy domain and NOS string */ 2009 } else { /* no room so create dummy domain and NOS string */
2010 /* if these kcallocs fail not much we 2010 /* if these kcallocs fail not much we
2011 can do, but better to not fail the 2011 can do, but better to not fail the
2012 sesssetup itself */ 2012 sesssetup itself */
2013 ses->serverDomain = 2013 ses->serverDomain =
2014 kcalloc(1, 2, GFP_KERNEL); 2014 kzalloc(2, GFP_KERNEL);
2015 ses->serverNOS = 2015 ses->serverNOS =
2016 kcalloc(1, 2, GFP_KERNEL); 2016 kzalloc(2, GFP_KERNEL);
2017 } 2017 }
2018 } else { /* ASCII */ 2018 } else { /* ASCII */
2019 len = strnlen(bcc_ptr, 1024); 2019 len = strnlen(bcc_ptr, 1024);
2020 if (((long) bcc_ptr + len) - (long) 2020 if (((long) bcc_ptr + len) - (long)
2021 pByteArea(smb_buffer_response) 2021 pByteArea(smb_buffer_response)
2022 <= BCC(smb_buffer_response)) { 2022 <= BCC(smb_buffer_response)) {
2023 ses->serverOS = kcalloc(1, len + 1,GFP_KERNEL); 2023 ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
2024 if(ses->serverOS == NULL) 2024 if(ses->serverOS == NULL)
2025 goto sesssetup_nomem; 2025 goto sesssetup_nomem;
2026 strncpy(ses->serverOS,bcc_ptr, len); 2026 strncpy(ses->serverOS,bcc_ptr, len);
@@ -2030,7 +2030,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2030 bcc_ptr++; 2030 bcc_ptr++;
2031 2031
2032 len = strnlen(bcc_ptr, 1024); 2032 len = strnlen(bcc_ptr, 1024);
2033 ses->serverNOS = kcalloc(1, len + 1,GFP_KERNEL); 2033 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
2034 if(ses->serverNOS == NULL) 2034 if(ses->serverNOS == NULL)
2035 goto sesssetup_nomem; 2035 goto sesssetup_nomem;
2036 strncpy(ses->serverNOS, bcc_ptr, len); 2036 strncpy(ses->serverNOS, bcc_ptr, len);
@@ -2039,7 +2039,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2039 bcc_ptr++; 2039 bcc_ptr++;
2040 2040
2041 len = strnlen(bcc_ptr, 1024); 2041 len = strnlen(bcc_ptr, 1024);
2042 ses->serverDomain = kcalloc(1, len + 1,GFP_KERNEL); 2042 ses->serverDomain = kzalloc(len + 1,GFP_KERNEL);
2043 if(ses->serverDomain == NULL) 2043 if(ses->serverDomain == NULL)
2044 goto sesssetup_nomem; 2044 goto sesssetup_nomem;
2045 strncpy(ses->serverDomain, bcc_ptr, len); 2045 strncpy(ses->serverDomain, bcc_ptr, len);
@@ -2240,7 +2240,7 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2240 the end since (at least) WIN2K and Windows XP have a major bug in not null 2240 the end since (at least) WIN2K and Windows XP have a major bug in not null
2241 terminating last Unicode string in response */ 2241 terminating last Unicode string in response */
2242 ses->serverOS = 2242 ses->serverOS =
2243 kcalloc(1, 2 * (len + 1), GFP_KERNEL); 2243 kzalloc(2 * (len + 1), GFP_KERNEL);
2244 cifs_strfromUCS_le(ses->serverOS, 2244 cifs_strfromUCS_le(ses->serverOS,
2245 (wchar_t *) 2245 (wchar_t *)
2246 bcc_ptr, len, 2246 bcc_ptr, len,
@@ -2254,7 +2254,7 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2254 remaining_words 2254 remaining_words
2255 - 1); 2255 - 1);
2256 ses->serverNOS = 2256 ses->serverNOS =
2257 kcalloc(1, 2 * (len + 1), 2257 kzalloc(2 * (len + 1),
2258 GFP_KERNEL); 2258 GFP_KERNEL);
2259 cifs_strfromUCS_le(ses->serverNOS, 2259 cifs_strfromUCS_le(ses->serverNOS,
2260 (wchar_t *)bcc_ptr, 2260 (wchar_t *)bcc_ptr,
@@ -2267,7 +2267,7 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2267 if (remaining_words > 0) { 2267 if (remaining_words > 0) {
2268 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2268 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
2269 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2269 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
2270 ses->serverDomain = kcalloc(1, 2*(len+1),GFP_KERNEL); 2270 ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL);
2271 cifs_strfromUCS_le(ses->serverDomain, 2271 cifs_strfromUCS_le(ses->serverDomain,
2272 (wchar_t *)bcc_ptr, 2272 (wchar_t *)bcc_ptr,
2273 len, 2273 len,
@@ -2278,10 +2278,10 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2278 } /* else no more room so create dummy domain string */ 2278 } /* else no more room so create dummy domain string */
2279 else 2279 else
2280 ses->serverDomain = 2280 ses->serverDomain =
2281 kcalloc(1, 2,GFP_KERNEL); 2281 kzalloc(2,GFP_KERNEL);
2282 } else { /* no room so create dummy domain and NOS string */ 2282 } else { /* no room so create dummy domain and NOS string */
2283 ses->serverDomain = kcalloc(1, 2, GFP_KERNEL); 2283 ses->serverDomain = kzalloc(2, GFP_KERNEL);
2284 ses->serverNOS = kcalloc(1, 2, GFP_KERNEL); 2284 ses->serverNOS = kzalloc(2, GFP_KERNEL);
2285 } 2285 }
2286 } else { /* ASCII */ 2286 } else { /* ASCII */
2287 2287
@@ -2289,7 +2289,7 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2289 if (((long) bcc_ptr + len) - (long) 2289 if (((long) bcc_ptr + len) - (long)
2290 pByteArea(smb_buffer_response) 2290 pByteArea(smb_buffer_response)
2291 <= BCC(smb_buffer_response)) { 2291 <= BCC(smb_buffer_response)) {
2292 ses->serverOS = kcalloc(1, len + 1, GFP_KERNEL); 2292 ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
2293 strncpy(ses->serverOS, bcc_ptr, len); 2293 strncpy(ses->serverOS, bcc_ptr, len);
2294 2294
2295 bcc_ptr += len; 2295 bcc_ptr += len;
@@ -2297,14 +2297,14 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2297 bcc_ptr++; 2297 bcc_ptr++;
2298 2298
2299 len = strnlen(bcc_ptr, 1024); 2299 len = strnlen(bcc_ptr, 1024);
2300 ses->serverNOS = kcalloc(1, len + 1,GFP_KERNEL); 2300 ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
2301 strncpy(ses->serverNOS, bcc_ptr, len); 2301 strncpy(ses->serverNOS, bcc_ptr, len);
2302 bcc_ptr += len; 2302 bcc_ptr += len;
2303 bcc_ptr[0] = 0; 2303 bcc_ptr[0] = 0;
2304 bcc_ptr++; 2304 bcc_ptr++;
2305 2305
2306 len = strnlen(bcc_ptr, 1024); 2306 len = strnlen(bcc_ptr, 1024);
2307 ses->serverDomain = kcalloc(1, len + 1, GFP_KERNEL); 2307 ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
2308 strncpy(ses->serverDomain, bcc_ptr, len); 2308 strncpy(ses->serverDomain, bcc_ptr, len);
2309 bcc_ptr += len; 2309 bcc_ptr += len;
2310 bcc_ptr[0] = 0; 2310 bcc_ptr[0] = 0;
@@ -2554,7 +2554,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2554 the end since (at least) WIN2K and Windows XP have a major bug in not null 2554 the end since (at least) WIN2K and Windows XP have a major bug in not null
2555 terminating last Unicode string in response */ 2555 terminating last Unicode string in response */
2556 ses->serverOS = 2556 ses->serverOS =
2557 kcalloc(1, 2 * (len + 1), GFP_KERNEL); 2557 kzalloc(2 * (len + 1), GFP_KERNEL);
2558 cifs_strfromUCS_le(ses->serverOS, 2558 cifs_strfromUCS_le(ses->serverOS,
2559 (wchar_t *) 2559 (wchar_t *)
2560 bcc_ptr, len, 2560 bcc_ptr, len,
@@ -2569,7 +2569,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2569 remaining_words 2569 remaining_words
2570 - 1); 2570 - 1);
2571 ses->serverNOS = 2571 ses->serverNOS =
2572 kcalloc(1, 2 * (len + 1), 2572 kzalloc(2 * (len + 1),
2573 GFP_KERNEL); 2573 GFP_KERNEL);
2574 cifs_strfromUCS_le(ses-> 2574 cifs_strfromUCS_le(ses->
2575 serverNOS, 2575 serverNOS,
@@ -2586,7 +2586,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2586 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2586 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
2587 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */ 2587 /* last string is not always null terminated (for e.g. for Windows XP & 2000) */
2588 ses->serverDomain = 2588 ses->serverDomain =
2589 kcalloc(1, 2 * 2589 kzalloc(2 *
2590 (len + 2590 (len +
2591 1), 2591 1),
2592 GFP_KERNEL); 2592 GFP_KERNEL);
@@ -2612,13 +2612,13 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2612 } /* else no more room so create dummy domain string */ 2612 } /* else no more room so create dummy domain string */
2613 else 2613 else
2614 ses->serverDomain = 2614 ses->serverDomain =
2615 kcalloc(1, 2, 2615 kzalloc(2,
2616 GFP_KERNEL); 2616 GFP_KERNEL);
2617 } else { /* no room so create dummy domain and NOS string */ 2617 } else { /* no room so create dummy domain and NOS string */
2618 ses->serverDomain = 2618 ses->serverDomain =
2619 kcalloc(1, 2, GFP_KERNEL); 2619 kzalloc(2, GFP_KERNEL);
2620 ses->serverNOS = 2620 ses->serverNOS =
2621 kcalloc(1, 2, GFP_KERNEL); 2621 kzalloc(2, GFP_KERNEL);
2622 } 2622 }
2623 } else { /* ASCII */ 2623 } else { /* ASCII */
2624 len = strnlen(bcc_ptr, 1024); 2624 len = strnlen(bcc_ptr, 1024);
@@ -2626,7 +2626,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2626 pByteArea(smb_buffer_response) 2626 pByteArea(smb_buffer_response)
2627 <= BCC(smb_buffer_response)) { 2627 <= BCC(smb_buffer_response)) {
2628 ses->serverOS = 2628 ses->serverOS =
2629 kcalloc(1, len + 1, 2629 kzalloc(len + 1,
2630 GFP_KERNEL); 2630 GFP_KERNEL);
2631 strncpy(ses->serverOS, 2631 strncpy(ses->serverOS,
2632 bcc_ptr, len); 2632 bcc_ptr, len);
@@ -2637,7 +2637,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2637 2637
2638 len = strnlen(bcc_ptr, 1024); 2638 len = strnlen(bcc_ptr, 1024);
2639 ses->serverNOS = 2639 ses->serverNOS =
2640 kcalloc(1, len + 1, 2640 kzalloc(len + 1,
2641 GFP_KERNEL); 2641 GFP_KERNEL);
2642 strncpy(ses->serverNOS, bcc_ptr, len); 2642 strncpy(ses->serverNOS, bcc_ptr, len);
2643 bcc_ptr += len; 2643 bcc_ptr += len;
@@ -2646,7 +2646,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
2646 2646
2647 len = strnlen(bcc_ptr, 1024); 2647 len = strnlen(bcc_ptr, 1024);
2648 ses->serverDomain = 2648 ses->serverDomain =
2649 kcalloc(1, len + 1, 2649 kzalloc(len + 1,
2650 GFP_KERNEL); 2650 GFP_KERNEL);
2651 strncpy(ses->serverDomain, bcc_ptr, len); 2651 strncpy(ses->serverDomain, bcc_ptr, len);
2652 bcc_ptr += len; 2652 bcc_ptr += len;
@@ -2948,7 +2948,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2948 the end since (at least) WIN2K and Windows XP have a major bug in not null 2948 the end since (at least) WIN2K and Windows XP have a major bug in not null
2949 terminating last Unicode string in response */ 2949 terminating last Unicode string in response */
2950 ses->serverOS = 2950 ses->serverOS =
2951 kcalloc(1, 2 * (len + 1), GFP_KERNEL); 2951 kzalloc(2 * (len + 1), GFP_KERNEL);
2952 cifs_strfromUCS_le(ses->serverOS, 2952 cifs_strfromUCS_le(ses->serverOS,
2953 (wchar_t *) 2953 (wchar_t *)
2954 bcc_ptr, len, 2954 bcc_ptr, len,
@@ -2963,7 +2963,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2963 remaining_words 2963 remaining_words
2964 - 1); 2964 - 1);
2965 ses->serverNOS = 2965 ses->serverNOS =
2966 kcalloc(1, 2 * (len + 1), 2966 kzalloc(2 * (len + 1),
2967 GFP_KERNEL); 2967 GFP_KERNEL);
2968 cifs_strfromUCS_le(ses-> 2968 cifs_strfromUCS_le(ses->
2969 serverNOS, 2969 serverNOS,
@@ -2979,7 +2979,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
2979 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words); 2979 len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
2980 /* last string not always null terminated (e.g. for Windows XP & 2000) */ 2980 /* last string not always null terminated (e.g. for Windows XP & 2000) */
2981 ses->serverDomain = 2981 ses->serverDomain =
2982 kcalloc(1, 2 * 2982 kzalloc(2 *
2983 (len + 2983 (len +
2984 1), 2984 1),
2985 GFP_KERNEL); 2985 GFP_KERNEL);
@@ -3004,17 +3004,17 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
3004 = 0; 3004 = 0;
3005 } /* else no more room so create dummy domain string */ 3005 } /* else no more room so create dummy domain string */
3006 else 3006 else
3007 ses->serverDomain = kcalloc(1, 2,GFP_KERNEL); 3007 ses->serverDomain = kzalloc(2,GFP_KERNEL);
3008 } else { /* no room so create dummy domain and NOS string */ 3008 } else { /* no room so create dummy domain and NOS string */
3009 ses->serverDomain = kcalloc(1, 2, GFP_KERNEL); 3009 ses->serverDomain = kzalloc(2, GFP_KERNEL);
3010 ses->serverNOS = kcalloc(1, 2, GFP_KERNEL); 3010 ses->serverNOS = kzalloc(2, GFP_KERNEL);
3011 } 3011 }
3012 } else { /* ASCII */ 3012 } else { /* ASCII */
3013 len = strnlen(bcc_ptr, 1024); 3013 len = strnlen(bcc_ptr, 1024);
3014 if (((long) bcc_ptr + len) - 3014 if (((long) bcc_ptr + len) -
3015 (long) pByteArea(smb_buffer_response) 3015 (long) pByteArea(smb_buffer_response)
3016 <= BCC(smb_buffer_response)) { 3016 <= BCC(smb_buffer_response)) {
3017 ses->serverOS = kcalloc(1, len + 1,GFP_KERNEL); 3017 ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
3018 strncpy(ses->serverOS,bcc_ptr, len); 3018 strncpy(ses->serverOS,bcc_ptr, len);
3019 3019
3020 bcc_ptr += len; 3020 bcc_ptr += len;
@@ -3022,14 +3022,14 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
3022 bcc_ptr++; 3022 bcc_ptr++;
3023 3023
3024 len = strnlen(bcc_ptr, 1024); 3024 len = strnlen(bcc_ptr, 1024);
3025 ses->serverNOS = kcalloc(1, len+1,GFP_KERNEL); 3025 ses->serverNOS = kzalloc(len+1,GFP_KERNEL);
3026 strncpy(ses->serverNOS, bcc_ptr, len); 3026 strncpy(ses->serverNOS, bcc_ptr, len);
3027 bcc_ptr += len; 3027 bcc_ptr += len;
3028 bcc_ptr[0] = 0; 3028 bcc_ptr[0] = 0;
3029 bcc_ptr++; 3029 bcc_ptr++;
3030 3030
3031 len = strnlen(bcc_ptr, 1024); 3031 len = strnlen(bcc_ptr, 1024);
3032 ses->serverDomain = kcalloc(1, len+1,GFP_KERNEL); 3032 ses->serverDomain = kzalloc(len+1,GFP_KERNEL);
3033 strncpy(ses->serverDomain, bcc_ptr, len); 3033 strncpy(ses->serverDomain, bcc_ptr, len);
3034 bcc_ptr += len; 3034 bcc_ptr += len;
3035 bcc_ptr[0] = 0; 3035 bcc_ptr[0] = 0;
@@ -3141,7 +3141,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3141 if(tcon->nativeFileSystem) 3141 if(tcon->nativeFileSystem)
3142 kfree(tcon->nativeFileSystem); 3142 kfree(tcon->nativeFileSystem);
3143 tcon->nativeFileSystem = 3143 tcon->nativeFileSystem =
3144 kcalloc(1, length + 2, GFP_KERNEL); 3144 kzalloc(length + 2, GFP_KERNEL);
3145 cifs_strfromUCS_le(tcon->nativeFileSystem, 3145 cifs_strfromUCS_le(tcon->nativeFileSystem,
3146 (wchar_t *) bcc_ptr, 3146 (wchar_t *) bcc_ptr,
3147 length, nls_codepage); 3147 length, nls_codepage);
@@ -3159,7 +3159,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
3159 if(tcon->nativeFileSystem) 3159 if(tcon->nativeFileSystem)
3160 kfree(tcon->nativeFileSystem); 3160 kfree(tcon->nativeFileSystem);
3161 tcon->nativeFileSystem = 3161 tcon->nativeFileSystem =
3162 kcalloc(1, length + 1, GFP_KERNEL); 3162 kzalloc(length + 1, GFP_KERNEL);
3163 strncpy(tcon->nativeFileSystem, bcc_ptr, 3163 strncpy(tcon->nativeFileSystem, bcc_ptr,
3164 length); 3164 length);
3165 } 3165 }
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3f3538d4a1fa..d335269bd91c 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -145,24 +145,23 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
145 return -ENOMEM; 145 return -ENOMEM;
146 } 146 }
147 147
148 if(nd) { 148 if(nd && (nd->flags & LOOKUP_OPEN)) {
149 if ((nd->intent.open.flags & O_ACCMODE) == O_RDONLY) 149 int oflags = nd->intent.open.flags;
150 desiredAccess = GENERIC_READ; 150
151 else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY) { 151 desiredAccess = 0;
152 desiredAccess = GENERIC_WRITE; 152 if (oflags & FMODE_READ)
153 write_only = TRUE; 153 desiredAccess |= GENERIC_READ;
154 } else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) { 154 if (oflags & FMODE_WRITE) {
155 /* GENERIC_ALL is too much permission to request */ 155 desiredAccess |= GENERIC_WRITE;
156 /* can cause unnecessary access denied on create */ 156 if (!(oflags & FMODE_READ))
157 /* desiredAccess = GENERIC_ALL; */ 157 write_only = TRUE;
158 desiredAccess = GENERIC_READ | GENERIC_WRITE;
159 } 158 }
160 159
161 if((nd->intent.open.flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) 160 if((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
162 disposition = FILE_CREATE; 161 disposition = FILE_CREATE;
163 else if((nd->intent.open.flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC)) 162 else if((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
164 disposition = FILE_OVERWRITE_IF; 163 disposition = FILE_OVERWRITE_IF;
165 else if((nd->intent.open.flags & O_CREAT) == O_CREAT) 164 else if((oflags & O_CREAT) == O_CREAT)
166 disposition = FILE_OPEN_IF; 165 disposition = FILE_OPEN_IF;
167 else { 166 else {
168 cFYI(1,("Create flag not set in create function")); 167 cFYI(1,("Create flag not set in create function"));
diff --git a/fs/compat.c b/fs/compat.c
index 6b06b6bae35e..8c665705c6a0 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -310,96 +310,6 @@ static int __init init_sys32_ioctl(void)
310 310
311__initcall(init_sys32_ioctl); 311__initcall(init_sys32_ioctl);
312 312
313int register_ioctl32_conversion(unsigned int cmd,
314 ioctl_trans_handler_t handler)
315{
316 struct ioctl_trans *t;
317 struct ioctl_trans *new_t;
318 unsigned long hash = ioctl32_hash(cmd);
319
320 new_t = kmalloc(sizeof(*new_t), GFP_KERNEL);
321 if (!new_t)
322 return -ENOMEM;
323
324 down_write(&ioctl32_sem);
325 for (t = ioctl32_hash_table[hash]; t; t = t->next) {
326 if (t->cmd == cmd) {
327 printk(KERN_ERR "Trying to register duplicated ioctl32 "
328 "handler %x\n", cmd);
329 up_write(&ioctl32_sem);
330 kfree(new_t);
331 return -EINVAL;
332 }
333 }
334 new_t->next = NULL;
335 new_t->cmd = cmd;
336 new_t->handler = handler;
337 ioctl32_insert_translation(new_t);
338
339 up_write(&ioctl32_sem);
340 return 0;
341}
342EXPORT_SYMBOL(register_ioctl32_conversion);
343
344static inline int builtin_ioctl(struct ioctl_trans *t)
345{
346 return t >= ioctl_start && t < (ioctl_start + ioctl_table_size);
347}
348
349/* Problem:
350 This function cannot unregister duplicate ioctls, because they are not
351 unique.
352 When they happen we need to extend the prototype to pass the handler too. */
353
354int unregister_ioctl32_conversion(unsigned int cmd)
355{
356 unsigned long hash = ioctl32_hash(cmd);
357 struct ioctl_trans *t, *t1;
358
359 down_write(&ioctl32_sem);
360
361 t = ioctl32_hash_table[hash];
362 if (!t) {
363 up_write(&ioctl32_sem);
364 return -EINVAL;
365 }
366
367 if (t->cmd == cmd) {
368 if (builtin_ioctl(t)) {
369 printk("%p tried to unregister builtin ioctl %x\n",
370 __builtin_return_address(0), cmd);
371 } else {
372 ioctl32_hash_table[hash] = t->next;
373 up_write(&ioctl32_sem);
374 kfree(t);
375 return 0;
376 }
377 }
378 while (t->next) {
379 t1 = t->next;
380 if (t1->cmd == cmd) {
381 if (builtin_ioctl(t1)) {
382 printk("%p tried to unregister builtin "
383 "ioctl %x\n",
384 __builtin_return_address(0), cmd);
385 goto out;
386 } else {
387 t->next = t1->next;
388 up_write(&ioctl32_sem);
389 kfree(t1);
390 return 0;
391 }
392 }
393 t = t1;
394 }
395 printk(KERN_ERR "Trying to free unknown 32bit ioctl handler %x\n",
396 cmd);
397out:
398 up_write(&ioctl32_sem);
399 return -EINVAL;
400}
401EXPORT_SYMBOL(unregister_ioctl32_conversion);
402
403static void compat_ioctl_error(struct file *filp, unsigned int fd, 313static void compat_ioctl_error(struct file *filp, unsigned int fd,
404 unsigned int cmd, unsigned long arg) 314 unsigned int cmd, unsigned long arg)
405{ 315{
@@ -720,14 +630,14 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb)
720struct compat_ncp_mount_data { 630struct compat_ncp_mount_data {
721 compat_int_t version; 631 compat_int_t version;
722 compat_uint_t ncp_fd; 632 compat_uint_t ncp_fd;
723 compat_uid_t mounted_uid; 633 __compat_uid_t mounted_uid;
724 compat_pid_t wdog_pid; 634 compat_pid_t wdog_pid;
725 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; 635 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
726 compat_uint_t time_out; 636 compat_uint_t time_out;
727 compat_uint_t retry_count; 637 compat_uint_t retry_count;
728 compat_uint_t flags; 638 compat_uint_t flags;
729 compat_uid_t uid; 639 __compat_uid_t uid;
730 compat_gid_t gid; 640 __compat_gid_t gid;
731 compat_mode_t file_mode; 641 compat_mode_t file_mode;
732 compat_mode_t dir_mode; 642 compat_mode_t dir_mode;
733}; 643};
@@ -784,9 +694,9 @@ static void *do_ncp_super_data_conv(void *raw_data)
784 694
785struct compat_smb_mount_data { 695struct compat_smb_mount_data {
786 compat_int_t version; 696 compat_int_t version;
787 compat_uid_t mounted_uid; 697 __compat_uid_t mounted_uid;
788 compat_uid_t uid; 698 __compat_uid_t uid;
789 compat_gid_t gid; 699 __compat_gid_t gid;
790 compat_mode_t file_mode; 700 compat_mode_t file_mode;
791 compat_mode_t dir_mode; 701 compat_mode_t dir_mode;
792}; 702};
@@ -1365,6 +1275,16 @@ out:
1365} 1275}
1366 1276
1367/* 1277/*
1278 * Exactly like fs/open.c:sys_open(), except that it doesn't set the
1279 * O_LARGEFILE flag.
1280 */
1281asmlinkage long
1282compat_sys_open(const char __user *filename, int flags, int mode)
1283{
1284 return do_sys_open(filename, flags, mode);
1285}
1286
1287/*
1368 * compat_count() counts the number of arguments/envelopes. It is basically 1288 * compat_count() counts the number of arguments/envelopes. It is basically
1369 * a copy of count() from fs/exec.c, except that it works with 32 bit argv 1289 * a copy of count() from fs/exec.c, except that it works with 32 bit argv
1370 * and envp pointers. 1290 * and envp pointers.
@@ -1808,8 +1728,8 @@ struct compat_nfsctl_export {
1808 compat_dev_t ex32_dev; 1728 compat_dev_t ex32_dev;
1809 compat_ino_t ex32_ino; 1729 compat_ino_t ex32_ino;
1810 compat_int_t ex32_flags; 1730 compat_int_t ex32_flags;
1811 compat_uid_t ex32_anon_uid; 1731 __compat_uid_t ex32_anon_uid;
1812 compat_gid_t ex32_anon_gid; 1732 __compat_gid_t ex32_anon_gid;
1813}; 1733};
1814 1734
1815struct compat_nfsctl_fdparm { 1735struct compat_nfsctl_fdparm {
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 6c285efa2004..7fe85415ae7c 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -39,12 +39,47 @@ static DECLARE_MUTEX(read_mutex);
39#define CRAMINO(x) ((x)->offset?(x)->offset<<2:1) 39#define CRAMINO(x) ((x)->offset?(x)->offset<<2:1)
40#define OFFSET(x) ((x)->i_ino) 40#define OFFSET(x) ((x)->i_ino)
41 41
42static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inode * cramfs_inode) 42
43static int cramfs_iget5_test(struct inode *inode, void *opaque)
44{
45 struct cramfs_inode *cramfs_inode = opaque;
46
47 if (inode->i_ino != CRAMINO(cramfs_inode))
48 return 0; /* does not match */
49
50 if (inode->i_ino != 1)
51 return 1;
52
53 /* all empty directories, char, block, pipe, and sock, share inode #1 */
54
55 if ((inode->i_mode != cramfs_inode->mode) ||
56 (inode->i_gid != cramfs_inode->gid) ||
57 (inode->i_uid != cramfs_inode->uid))
58 return 0; /* does not match */
59
60 if ((S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) &&
61 (inode->i_rdev != old_decode_dev(cramfs_inode->size)))
62 return 0; /* does not match */
63
64 return 1; /* matches */
65}
66
67static int cramfs_iget5_set(struct inode *inode, void *opaque)
68{
69 struct cramfs_inode *cramfs_inode = opaque;
70 inode->i_ino = CRAMINO(cramfs_inode);
71 return 0;
72}
73
74static struct inode *get_cramfs_inode(struct super_block *sb,
75 struct cramfs_inode * cramfs_inode)
43{ 76{
44 struct inode * inode = new_inode(sb); 77 struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode),
78 cramfs_iget5_test, cramfs_iget5_set,
79 cramfs_inode);
45 static struct timespec zerotime; 80 static struct timespec zerotime;
46 81
47 if (inode) { 82 if (inode && (inode->i_state & I_NEW)) {
48 inode->i_mode = cramfs_inode->mode; 83 inode->i_mode = cramfs_inode->mode;
49 inode->i_uid = cramfs_inode->uid; 84 inode->i_uid = cramfs_inode->uid;
50 inode->i_size = cramfs_inode->size; 85 inode->i_size = cramfs_inode->size;
@@ -58,7 +93,6 @@ static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inod
58 but it's the best we can do without reading the directory 93 but it's the best we can do without reading the directory
59 contents. 1 yields the right result in GNU find, even 94 contents. 1 yields the right result in GNU find, even
60 without -noleaf option. */ 95 without -noleaf option. */
61 insert_inode_hash(inode);
62 if (S_ISREG(inode->i_mode)) { 96 if (S_ISREG(inode->i_mode)) {
63 inode->i_fop = &generic_ro_fops; 97 inode->i_fop = &generic_ro_fops;
64 inode->i_data.a_ops = &cramfs_aops; 98 inode->i_data.a_ops = &cramfs_aops;
@@ -74,6 +108,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, struct cramfs_inod
74 init_special_inode(inode, inode->i_mode, 108 init_special_inode(inode, inode->i_mode,
75 old_decode_dev(cramfs_inode->size)); 109 old_decode_dev(cramfs_inode->size));
76 } 110 }
111 unlock_new_inode(inode);
77 } 112 }
78 return inode; 113 return inode;
79} 114}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index dcfe331dc4c4..3c0c7c6a5b44 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -19,6 +19,7 @@
19#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/fs.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/blkdev.h> 25#include <linux/blkdev.h>
@@ -27,6 +28,8 @@
27#include <linux/buffer_head.h> 28#include <linux/buffer_head.h>
28#include <linux/smp_lock.h> 29#include <linux/smp_lock.h>
29#include <linux/vfs.h> 30#include <linux/vfs.h>
31#include <linux/seq_file.h>
32#include <linux/mount.h>
30#include <asm/uaccess.h> 33#include <asm/uaccess.h>
31#include "ext2.h" 34#include "ext2.h"
32#include "xattr.h" 35#include "xattr.h"
@@ -201,6 +204,26 @@ static void ext2_clear_inode(struct inode *inode)
201#endif 204#endif
202} 205}
203 206
207static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
208{
209 struct ext2_sb_info *sbi = EXT2_SB(vfs->mnt_sb);
210
211 if (sbi->s_mount_opt & EXT2_MOUNT_GRPID)
212 seq_puts(seq, ",grpid");
213 else
214 seq_puts(seq, ",nogrpid");
215
216#if defined(CONFIG_QUOTA)
217 if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
218 seq_puts(seq, ",usrquota");
219
220 if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
221 seq_puts(seq, ",grpquota");
222#endif
223
224 return 0;
225}
226
204#ifdef CONFIG_QUOTA 227#ifdef CONFIG_QUOTA
205static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); 228static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off);
206static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); 229static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
@@ -218,6 +241,7 @@ static struct super_operations ext2_sops = {
218 .statfs = ext2_statfs, 241 .statfs = ext2_statfs,
219 .remount_fs = ext2_remount, 242 .remount_fs = ext2_remount,
220 .clear_inode = ext2_clear_inode, 243 .clear_inode = ext2_clear_inode,
244 .show_options = ext2_show_options,
221#ifdef CONFIG_QUOTA 245#ifdef CONFIG_QUOTA
222 .quota_read = ext2_quota_read, 246 .quota_read = ext2_quota_read,
223 .quota_write = ext2_quota_write, 247 .quota_write = ext2_quota_write,
@@ -256,10 +280,11 @@ static unsigned long get_sb_block(void **data)
256 280
257enum { 281enum {
258 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, 282 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
259 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, 283 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
260 Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh, 284 Opt_err_ro, Opt_nouid32, Opt_check, Opt_nocheck, Opt_debug,
261 Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_xip, 285 Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
262 Opt_ignore, Opt_err, 286 Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
287 Opt_usrquota, Opt_grpquota
263}; 288};
264 289
265static match_table_t tokens = { 290static match_table_t tokens = {
@@ -288,10 +313,10 @@ static match_table_t tokens = {
288 {Opt_acl, "acl"}, 313 {Opt_acl, "acl"},
289 {Opt_noacl, "noacl"}, 314 {Opt_noacl, "noacl"},
290 {Opt_xip, "xip"}, 315 {Opt_xip, "xip"},
291 {Opt_ignore, "grpquota"}, 316 {Opt_grpquota, "grpquota"},
292 {Opt_ignore, "noquota"}, 317 {Opt_ignore, "noquota"},
293 {Opt_ignore, "quota"}, 318 {Opt_quota, "quota"},
294 {Opt_ignore, "usrquota"}, 319 {Opt_usrquota, "usrquota"},
295 {Opt_err, NULL} 320 {Opt_err, NULL}
296}; 321};
297 322
@@ -406,6 +431,26 @@ static int parse_options (char * options,
406 printk("EXT2 xip option not supported\n"); 431 printk("EXT2 xip option not supported\n");
407#endif 432#endif
408 break; 433 break;
434
435#if defined(CONFIG_QUOTA)
436 case Opt_quota:
437 case Opt_usrquota:
438 set_opt(sbi->s_mount_opt, USRQUOTA);
439 break;
440
441 case Opt_grpquota:
442 set_opt(sbi->s_mount_opt, GRPQUOTA);
443 break;
444#else
445 case Opt_quota:
446 case Opt_usrquota:
447 case Opt_grpquota:
448 printk(KERN_ERR
449 "EXT2-fs: quota operations not supported.\n");
450
451 break;
452#endif
453
409 case Opt_ignore: 454 case Opt_ignore:
410 break; 455 break;
411 default: 456 default:
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 3c3c6e399fb3..a93c3609025d 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -35,6 +35,7 @@
35#include <linux/mount.h> 35#include <linux/mount.h>
36#include <linux/namei.h> 36#include <linux/namei.h>
37#include <linux/quotaops.h> 37#include <linux/quotaops.h>
38#include <linux/seq_file.h>
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include "xattr.h" 40#include "xattr.h"
40#include "acl.h" 41#include "acl.h"
@@ -509,8 +510,41 @@ static void ext3_clear_inode(struct inode *inode)
509 kfree(rsv); 510 kfree(rsv);
510} 511}
511 512
512#ifdef CONFIG_QUOTA 513static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
514{
515 struct ext3_sb_info *sbi = EXT3_SB(vfs->mnt_sb);
516
517 if (sbi->s_mount_opt & EXT3_MOUNT_JOURNAL_DATA)
518 seq_puts(seq, ",data=journal");
519
520 if (sbi->s_mount_opt & EXT3_MOUNT_ORDERED_DATA)
521 seq_puts(seq, ",data=ordered");
522
523 if (sbi->s_mount_opt & EXT3_MOUNT_WRITEBACK_DATA)
524 seq_puts(seq, ",data=writeback");
525
526#if defined(CONFIG_QUOTA)
527 if (sbi->s_jquota_fmt)
528 seq_printf(seq, ",jqfmt=%s",
529 (sbi->s_jquota_fmt == QFMT_VFS_OLD) ? "vfsold": "vfsv0");
530
531 if (sbi->s_qf_names[USRQUOTA])
532 seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
533
534 if (sbi->s_qf_names[GRPQUOTA])
535 seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
513 536
537 if (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA)
538 seq_puts(seq, ",usrquota");
539
540 if (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)
541 seq_puts(seq, ",grpquota");
542#endif
543
544 return 0;
545}
546
547#ifdef CONFIG_QUOTA
514#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") 548#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
515#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) 549#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
516 550
@@ -569,6 +603,7 @@ static struct super_operations ext3_sops = {
569 .statfs = ext3_statfs, 603 .statfs = ext3_statfs,
570 .remount_fs = ext3_remount, 604 .remount_fs = ext3_remount,
571 .clear_inode = ext3_clear_inode, 605 .clear_inode = ext3_clear_inode,
606 .show_options = ext3_show_options,
572#ifdef CONFIG_QUOTA 607#ifdef CONFIG_QUOTA
573 .quota_read = ext3_quota_read, 608 .quota_read = ext3_quota_read,
574 .quota_write = ext3_quota_write, 609 .quota_write = ext3_quota_write,
@@ -590,7 +625,8 @@ enum {
590 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, 625 Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
591 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, 626 Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
592 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota, 627 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
593 Opt_ignore, Opt_barrier, Opt_err, Opt_resize, 628 Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
629 Opt_grpquota
594}; 630};
595 631
596static match_table_t tokens = { 632static match_table_t tokens = {
@@ -634,10 +670,10 @@ static match_table_t tokens = {
634 {Opt_grpjquota, "grpjquota=%s"}, 670 {Opt_grpjquota, "grpjquota=%s"},
635 {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, 671 {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
636 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, 672 {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
637 {Opt_quota, "grpquota"}, 673 {Opt_grpquota, "grpquota"},
638 {Opt_noquota, "noquota"}, 674 {Opt_noquota, "noquota"},
639 {Opt_quota, "quota"}, 675 {Opt_quota, "quota"},
640 {Opt_quota, "usrquota"}, 676 {Opt_usrquota, "usrquota"},
641 {Opt_barrier, "barrier=%u"}, 677 {Opt_barrier, "barrier=%u"},
642 {Opt_err, NULL}, 678 {Opt_err, NULL},
643 {Opt_resize, "resize"}, 679 {Opt_resize, "resize"},
@@ -903,7 +939,13 @@ clear_qf_name:
903 sbi->s_jquota_fmt = QFMT_VFS_V0; 939 sbi->s_jquota_fmt = QFMT_VFS_V0;
904 break; 940 break;
905 case Opt_quota: 941 case Opt_quota:
942 case Opt_usrquota:
906 set_opt(sbi->s_mount_opt, QUOTA); 943 set_opt(sbi->s_mount_opt, QUOTA);
944 set_opt(sbi->s_mount_opt, USRQUOTA);
945 break;
946 case Opt_grpquota:
947 set_opt(sbi->s_mount_opt, QUOTA);
948 set_opt(sbi->s_mount_opt, GRPQUOTA);
907 break; 949 break;
908 case Opt_noquota: 950 case Opt_noquota:
909 if (sb_any_quota_enabled(sb)) { 951 if (sb_any_quota_enabled(sb)) {
@@ -912,8 +954,13 @@ clear_qf_name:
912 return 0; 954 return 0;
913 } 955 }
914 clear_opt(sbi->s_mount_opt, QUOTA); 956 clear_opt(sbi->s_mount_opt, QUOTA);
957 clear_opt(sbi->s_mount_opt, USRQUOTA);
958 clear_opt(sbi->s_mount_opt, GRPQUOTA);
915 break; 959 break;
916#else 960#else
961 case Opt_quota:
962 case Opt_usrquota:
963 case Opt_grpquota:
917 case Opt_usrjquota: 964 case Opt_usrjquota:
918 case Opt_grpjquota: 965 case Opt_grpjquota:
919 case Opt_offusrjquota: 966 case Opt_offusrjquota:
@@ -924,7 +971,6 @@ clear_qf_name:
924 "EXT3-fs: journalled quota options not " 971 "EXT3-fs: journalled quota options not "
925 "supported.\n"); 972 "supported.\n");
926 break; 973 break;
927 case Opt_quota:
928 case Opt_noquota: 974 case Opt_noquota:
929 break; 975 break;
930#endif 976#endif
@@ -962,14 +1008,38 @@ clear_qf_name:
962 } 1008 }
963 } 1009 }
964#ifdef CONFIG_QUOTA 1010#ifdef CONFIG_QUOTA
965 if (!sbi->s_jquota_fmt && (sbi->s_qf_names[USRQUOTA] || 1011 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
966 sbi->s_qf_names[GRPQUOTA])) { 1012 if ((sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) &&
967 printk(KERN_ERR 1013 sbi->s_qf_names[USRQUOTA])
968 "EXT3-fs: journalled quota format not specified.\n"); 1014 clear_opt(sbi->s_mount_opt, USRQUOTA);
969 return 0; 1015
1016 if ((sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) &&
1017 sbi->s_qf_names[GRPQUOTA])
1018 clear_opt(sbi->s_mount_opt, GRPQUOTA);
1019
1020 if ((sbi->s_qf_names[USRQUOTA] &&
1021 (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) ||
1022 (sbi->s_qf_names[GRPQUOTA] &&
1023 (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) {
1024 printk(KERN_ERR "EXT3-fs: old and new quota "
1025 "format mixing.\n");
1026 return 0;
1027 }
1028
1029 if (!sbi->s_jquota_fmt) {
1030 printk(KERN_ERR "EXT3-fs: journalled quota format "
1031 "not specified.\n");
1032 return 0;
1033 }
1034 } else {
1035 if (sbi->s_jquota_fmt) {
1036 printk(KERN_ERR "EXT3-fs: journalled quota format "
1037 "specified with no journalling "
1038 "enabled.\n");
1039 return 0;
1040 }
970 } 1041 }
971#endif 1042#endif
972
973 return 1; 1043 return 1;
974} 1044}
975 1045
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index e5ae1b720dde..895049b2ac9c 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -30,6 +30,29 @@ static inline loff_t fat_make_i_pos(struct super_block *sb,
30 | (de - (struct msdos_dir_entry *)bh->b_data); 30 | (de - (struct msdos_dir_entry *)bh->b_data);
31} 31}
32 32
33static inline void fat_dir_readahead(struct inode *dir, sector_t iblock,
34 sector_t phys)
35{
36 struct super_block *sb = dir->i_sb;
37 struct msdos_sb_info *sbi = MSDOS_SB(sb);
38 struct buffer_head *bh;
39 int sec;
40
41 /* This is not a first sector of cluster, or sec_per_clus == 1 */
42 if ((iblock & (sbi->sec_per_clus - 1)) || sbi->sec_per_clus == 1)
43 return;
44 /* root dir of FAT12/FAT16 */
45 if ((sbi->fat_bits != 32) && (dir->i_ino == MSDOS_ROOT_INO))
46 return;
47
48 bh = sb_getblk(sb, phys);
49 if (bh && !buffer_uptodate(bh)) {
50 for (sec = 0; sec < sbi->sec_per_clus; sec++)
51 sb_breadahead(sb, phys + sec);
52 }
53 brelse(bh);
54}
55
33/* Returns the inode number of the directory entry at offset pos. If bh is 56/* Returns the inode number of the directory entry at offset pos. If bh is
34 non-NULL, it is brelse'd before. Pos is incremented. The buffer header is 57 non-NULL, it is brelse'd before. Pos is incremented. The buffer header is
35 returned in bh. 58 returned in bh.
@@ -58,6 +81,8 @@ next:
58 if (err || !phys) 81 if (err || !phys)
59 return -1; /* beyond EOF or error */ 82 return -1; /* beyond EOF or error */
60 83
84 fat_dir_readahead(dir, iblock, phys);
85
61 *bh = sb_bread(sb, phys); 86 *bh = sb_bread(sb, phys);
62 if (*bh == NULL) { 87 if (*bh == NULL) {
63 printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n", 88 printk(KERN_ERR "FAT: Directory bread(block %llu) failed\n",
@@ -635,8 +660,7 @@ RecEnd:
635EODir: 660EODir:
636 filp->f_pos = cpos; 661 filp->f_pos = cpos;
637FillFailed: 662FillFailed:
638 if (bh) 663 brelse(bh);
639 brelse(bh);
640 if (unicode) 664 if (unicode)
641 free_page((unsigned long)unicode); 665 free_page((unsigned long)unicode);
642out: 666out:
diff --git a/fs/file_table.c b/fs/file_table.c
index 1d3de78e6bc9..43e9e1737de2 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -89,7 +89,6 @@ struct file *get_empty_filp(void)
89 rwlock_init(&f->f_owner.lock); 89 rwlock_init(&f->f_owner.lock);
90 /* f->f_version: 0 */ 90 /* f->f_version: 0 */
91 INIT_LIST_HEAD(&f->f_list); 91 INIT_LIST_HEAD(&f->f_list);
92 f->f_maxcount = INT_MAX;
93 return f; 92 return f;
94 93
95over: 94over:
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 27f66d3e8a04..6aa6fbe4f8ee 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -155,7 +155,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
155 155
156 sbp->s_flags |= MS_RDONLY; 156 sbp->s_flags |= MS_RDONLY;
157 157
158 infp = kcalloc(1, sizeof(*infp), GFP_KERNEL); 158 infp = kzalloc(sizeof(*infp), GFP_KERNEL);
159 if (!infp) { 159 if (!infp) {
160 printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n"); 160 printk(KERN_WARNING "vxfs: unable to allocate incore superblock\n");
161 return -ENOMEM; 161 return -ENOMEM;
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index a096c5a56664..3d5cdc6847c0 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -13,8 +13,6 @@
13 13
14#include "btree.h" 14#include "btree.h"
15 15
16#define REF_PAGES 0
17
18void hfs_bnode_read(struct hfs_bnode *node, void *buf, 16void hfs_bnode_read(struct hfs_bnode *node, void *buf,
19 int off, int len) 17 int off, int len)
20{ 18{
@@ -289,9 +287,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
289 page_cache_release(page); 287 page_cache_release(page);
290 goto fail; 288 goto fail;
291 } 289 }
292#if !REF_PAGES
293 page_cache_release(page); 290 page_cache_release(page);
294#endif
295 node->page[i] = page; 291 node->page[i] = page;
296 } 292 }
297 293
@@ -449,13 +445,6 @@ void hfs_bnode_get(struct hfs_bnode *node)
449{ 445{
450 if (node) { 446 if (node) {
451 atomic_inc(&node->refcnt); 447 atomic_inc(&node->refcnt);
452#if REF_PAGES
453 {
454 int i;
455 for (i = 0; i < node->tree->pages_per_bnode; i++)
456 get_page(node->page[i]);
457 }
458#endif
459 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", 448 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
460 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 449 node->tree->cnid, node->this, atomic_read(&node->refcnt));
461 } 450 }
@@ -472,20 +461,12 @@ void hfs_bnode_put(struct hfs_bnode *node)
472 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 461 node->tree->cnid, node->this, atomic_read(&node->refcnt));
473 if (!atomic_read(&node->refcnt)) 462 if (!atomic_read(&node->refcnt))
474 BUG(); 463 BUG();
475 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) { 464 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
476#if REF_PAGES
477 for (i = 0; i < tree->pages_per_bnode; i++)
478 put_page(node->page[i]);
479#endif
480 return; 465 return;
481 }
482 for (i = 0; i < tree->pages_per_bnode; i++) { 466 for (i = 0; i < tree->pages_per_bnode; i++) {
483 if (!node->page[i]) 467 if (!node->page[i])
484 continue; 468 continue;
485 mark_page_accessed(node->page[i]); 469 mark_page_accessed(node->page[i]);
486#if REF_PAGES
487 put_page(node->page[i]);
488#endif
489 } 470 }
490 471
491 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { 472 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 65dedefcabfc..2fcd679f0238 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -20,12 +20,12 @@
20 * 20 *
21 * Given the ID of the parent and the name build a search key. 21 * Given the ID of the parent and the name build a search key.
22 */ 22 */
23void hfs_cat_build_key(btree_key *key, u32 parent, struct qstr *name) 23void hfs_cat_build_key(struct super_block *sb, btree_key *key, u32 parent, struct qstr *name)
24{ 24{
25 key->cat.reserved = 0; 25 key->cat.reserved = 0;
26 key->cat.ParID = cpu_to_be32(parent); 26 key->cat.ParID = cpu_to_be32(parent);
27 if (name) { 27 if (name) {
28 hfs_triv2mac(&key->cat.CName, name); 28 hfs_asc2mac(sb, &key->cat.CName, name);
29 key->key_len = 6 + key->cat.CName.len; 29 key->key_len = 6 + key->cat.CName.len;
30 } else { 30 } else {
31 memset(&key->cat.CName, 0, sizeof(struct hfs_name)); 31 memset(&key->cat.CName, 0, sizeof(struct hfs_name));
@@ -62,13 +62,14 @@ static int hfs_cat_build_record(hfs_cat_rec *rec, u32 cnid, struct inode *inode)
62 } 62 }
63} 63}
64 64
65static int hfs_cat_build_thread(hfs_cat_rec *rec, int type, 65static int hfs_cat_build_thread(struct super_block *sb,
66 hfs_cat_rec *rec, int type,
66 u32 parentid, struct qstr *name) 67 u32 parentid, struct qstr *name)
67{ 68{
68 rec->type = type; 69 rec->type = type;
69 memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved)); 70 memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved));
70 rec->thread.ParID = cpu_to_be32(parentid); 71 rec->thread.ParID = cpu_to_be32(parentid);
71 hfs_triv2mac(&rec->thread.CName, name); 72 hfs_asc2mac(sb, &rec->thread.CName, name);
72 return sizeof(struct hfs_cat_thread); 73 return sizeof(struct hfs_cat_thread);
73} 74}
74 75
@@ -93,8 +94,8 @@ int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *
93 sb = dir->i_sb; 94 sb = dir->i_sb;
94 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 95 hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
95 96
96 hfs_cat_build_key(fd.search_key, cnid, NULL); 97 hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
97 entry_size = hfs_cat_build_thread(&entry, S_ISDIR(inode->i_mode) ? 98 entry_size = hfs_cat_build_thread(sb, &entry, S_ISDIR(inode->i_mode) ?
98 HFS_CDR_THD : HFS_CDR_FTH, 99 HFS_CDR_THD : HFS_CDR_FTH,
99 dir->i_ino, str); 100 dir->i_ino, str);
100 err = hfs_brec_find(&fd); 101 err = hfs_brec_find(&fd);
@@ -107,7 +108,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *
107 if (err) 108 if (err)
108 goto err2; 109 goto err2;
109 110
110 hfs_cat_build_key(fd.search_key, dir->i_ino, str); 111 hfs_cat_build_key(sb, fd.search_key, dir->i_ino, str);
111 entry_size = hfs_cat_build_record(&entry, cnid, inode); 112 entry_size = hfs_cat_build_record(&entry, cnid, inode);
112 err = hfs_brec_find(&fd); 113 err = hfs_brec_find(&fd);
113 if (err != -ENOENT) { 114 if (err != -ENOENT) {
@@ -127,7 +128,7 @@ int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *
127 return 0; 128 return 0;
128 129
129err1: 130err1:
130 hfs_cat_build_key(fd.search_key, cnid, NULL); 131 hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
131 if (!hfs_brec_find(&fd)) 132 if (!hfs_brec_find(&fd))
132 hfs_brec_remove(&fd); 133 hfs_brec_remove(&fd);
133err2: 134err2:
@@ -176,7 +177,7 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
176 hfs_cat_rec rec; 177 hfs_cat_rec rec;
177 int res, len, type; 178 int res, len, type;
178 179
179 hfs_cat_build_key(fd->search_key, cnid, NULL); 180 hfs_cat_build_key(sb, fd->search_key, cnid, NULL);
180 res = hfs_brec_read(fd, &rec, sizeof(rec)); 181 res = hfs_brec_read(fd, &rec, sizeof(rec));
181 if (res) 182 if (res)
182 return res; 183 return res;
@@ -211,7 +212,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
211 sb = dir->i_sb; 212 sb = dir->i_sb;
212 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 213 hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
213 214
214 hfs_cat_build_key(fd.search_key, dir->i_ino, str); 215 hfs_cat_build_key(sb, fd.search_key, dir->i_ino, str);
215 res = hfs_brec_find(&fd); 216 res = hfs_brec_find(&fd);
216 if (res) 217 if (res)
217 goto out; 218 goto out;
@@ -239,7 +240,7 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
239 if (res) 240 if (res)
240 goto out; 241 goto out;
241 242
242 hfs_cat_build_key(fd.search_key, cnid, NULL); 243 hfs_cat_build_key(sb, fd.search_key, cnid, NULL);
243 res = hfs_brec_find(&fd); 244 res = hfs_brec_find(&fd);
244 if (!res) { 245 if (!res) {
245 res = hfs_brec_remove(&fd); 246 res = hfs_brec_remove(&fd);
@@ -280,7 +281,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
280 dst_fd = src_fd; 281 dst_fd = src_fd;
281 282
282 /* find the old dir entry and read the data */ 283 /* find the old dir entry and read the data */
283 hfs_cat_build_key(src_fd.search_key, src_dir->i_ino, src_name); 284 hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
284 err = hfs_brec_find(&src_fd); 285 err = hfs_brec_find(&src_fd);
285 if (err) 286 if (err)
286 goto out; 287 goto out;
@@ -289,7 +290,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
289 src_fd.entrylength); 290 src_fd.entrylength);
290 291
291 /* create new dir entry with the data from the old entry */ 292 /* create new dir entry with the data from the old entry */
292 hfs_cat_build_key(dst_fd.search_key, dst_dir->i_ino, dst_name); 293 hfs_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
293 err = hfs_brec_find(&dst_fd); 294 err = hfs_brec_find(&dst_fd);
294 if (err != -ENOENT) { 295 if (err != -ENOENT) {
295 if (!err) 296 if (!err)
@@ -305,7 +306,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
305 mark_inode_dirty(dst_dir); 306 mark_inode_dirty(dst_dir);
306 307
307 /* finally remove the old entry */ 308 /* finally remove the old entry */
308 hfs_cat_build_key(src_fd.search_key, src_dir->i_ino, src_name); 309 hfs_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name);
309 err = hfs_brec_find(&src_fd); 310 err = hfs_brec_find(&src_fd);
310 if (err) 311 if (err)
311 goto out; 312 goto out;
@@ -321,7 +322,7 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
321 goto out; 322 goto out;
322 323
323 /* remove old thread entry */ 324 /* remove old thread entry */
324 hfs_cat_build_key(src_fd.search_key, cnid, NULL); 325 hfs_cat_build_key(sb, src_fd.search_key, cnid, NULL);
325 err = hfs_brec_find(&src_fd); 326 err = hfs_brec_find(&src_fd);
326 if (err) 327 if (err)
327 goto out; 328 goto out;
@@ -330,8 +331,8 @@ int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
330 goto out; 331 goto out;
331 332
332 /* create new thread entry */ 333 /* create new thread entry */
333 hfs_cat_build_key(dst_fd.search_key, cnid, NULL); 334 hfs_cat_build_key(sb, dst_fd.search_key, cnid, NULL);
334 entry_size = hfs_cat_build_thread(&entry, type == HFS_CDR_FIL ? HFS_CDR_FTH : HFS_CDR_THD, 335 entry_size = hfs_cat_build_thread(sb, &entry, type == HFS_CDR_FIL ? HFS_CDR_FTH : HFS_CDR_THD,
335 dst_dir->i_ino, dst_name); 336 dst_dir->i_ino, dst_name);
336 err = hfs_brec_find(&dst_fd); 337 err = hfs_brec_find(&dst_fd);
337 if (err != -ENOENT) { 338 if (err != -ENOENT) {
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index c55998262aed..e1f24befba58 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -28,7 +28,7 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
28 dentry->d_op = &hfs_dentry_operations; 28 dentry->d_op = &hfs_dentry_operations;
29 29
30 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); 30 hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
31 hfs_cat_build_key(fd.search_key, dir->i_ino, &dentry->d_name); 31 hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
32 res = hfs_brec_read(&fd, &rec, sizeof(rec)); 32 res = hfs_brec_read(&fd, &rec, sizeof(rec));
33 if (res) { 33 if (res) {
34 hfs_find_exit(&fd); 34 hfs_find_exit(&fd);
@@ -56,7 +56,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
56 struct inode *inode = filp->f_dentry->d_inode; 56 struct inode *inode = filp->f_dentry->d_inode;
57 struct super_block *sb = inode->i_sb; 57 struct super_block *sb = inode->i_sb;
58 int len, err; 58 int len, err;
59 char strbuf[HFS_NAMELEN + 1]; 59 char strbuf[HFS_MAX_NAMELEN];
60 union hfs_cat_rec entry; 60 union hfs_cat_rec entry;
61 struct hfs_find_data fd; 61 struct hfs_find_data fd;
62 struct hfs_readdir_data *rd; 62 struct hfs_readdir_data *rd;
@@ -66,7 +66,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
66 return 0; 66 return 0;
67 67
68 hfs_find_init(HFS_SB(sb)->cat_tree, &fd); 68 hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
69 hfs_cat_build_key(fd.search_key, inode->i_ino, NULL); 69 hfs_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
70 err = hfs_brec_find(&fd); 70 err = hfs_brec_find(&fd);
71 if (err) 71 if (err)
72 goto out; 72 goto out;
@@ -111,7 +111,7 @@ static int hfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
111 } 111 }
112 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); 112 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength);
113 type = entry.type; 113 type = entry.type;
114 len = hfs_mac2triv(strbuf, &fd.key->cat.CName); 114 len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName);
115 if (type == HFS_CDR_DIR) { 115 if (type == HFS_CDR_DIR) {
116 if (fd.entrylength < sizeof(struct hfs_cat_dir)) { 116 if (fd.entrylength < sizeof(struct hfs_cat_dir)) {
117 printk("HFS: small dir entry\n"); 117 printk("HFS: small dir entry\n");
@@ -307,7 +307,8 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
307 old_dir, &old_dentry->d_name, 307 old_dir, &old_dentry->d_name,
308 new_dir, &new_dentry->d_name); 308 new_dir, &new_dentry->d_name);
309 if (!res) 309 if (!res)
310 hfs_cat_build_key((btree_key *)&HFS_I(old_dentry->d_inode)->cat_key, 310 hfs_cat_build_key(old_dir->i_sb,
311 (btree_key *)&HFS_I(old_dentry->d_inode)->cat_key,
311 new_dir->i_ino, &new_dentry->d_name); 312 new_dir->i_ino, &new_dentry->d_name);
312 return res; 313 return res;
313} 314}
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index df6b33adee3b..88099ab1a180 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -25,6 +25,7 @@
25#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */ 25#define HFS_SECTOR_SIZE 512 /* size of an HFS sector */
26#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */ 26#define HFS_SECTOR_SIZE_BITS 9 /* log_2(HFS_SECTOR_SIZE) */
27#define HFS_NAMELEN 31 /* maximum length of an HFS filename */ 27#define HFS_NAMELEN 31 /* maximum length of an HFS filename */
28#define HFS_MAX_NAMELEN 128
28#define HFS_MAX_VALENCE 32767U 29#define HFS_MAX_VALENCE 32767U
29 30
30/* Meanings of the drAtrb field of the MDB, 31/* Meanings of the drAtrb field of the MDB,
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 0dc8ef8e14de..aae019aadf88 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -141,6 +141,8 @@ struct hfs_sb_info {
141 141
142 int session, part; 142 int session, part;
143 143
144 struct nls_table *nls_io, *nls_disk;
145
144 struct semaphore bitmap_lock; 146 struct semaphore bitmap_lock;
145 147
146 unsigned long flags; 148 unsigned long flags;
@@ -168,7 +170,7 @@ extern int hfs_cat_create(u32, struct inode *, struct qstr *, struct inode *);
168extern int hfs_cat_delete(u32, struct inode *, struct qstr *); 170extern int hfs_cat_delete(u32, struct inode *, struct qstr *);
169extern int hfs_cat_move(u32, struct inode *, struct qstr *, 171extern int hfs_cat_move(u32, struct inode *, struct qstr *,
170 struct inode *, struct qstr *); 172 struct inode *, struct qstr *);
171extern void hfs_cat_build_key(btree_key *, u32, struct qstr *); 173extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *);
172 174
173/* dir.c */ 175/* dir.c */
174extern struct file_operations hfs_dir_operations; 176extern struct file_operations hfs_dir_operations;
@@ -222,8 +224,8 @@ extern int hfs_strcmp(const unsigned char *, unsigned int,
222extern int hfs_compare_dentry(struct dentry *, struct qstr *, struct qstr *); 224extern int hfs_compare_dentry(struct dentry *, struct qstr *, struct qstr *);
223 225
224/* trans.c */ 226/* trans.c */
225extern void hfs_triv2mac(struct hfs_name *, struct qstr *); 227extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *);
226extern int hfs_mac2triv(char *, const struct hfs_name *); 228extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *);
227 229
228extern struct timezone sys_tz; 230extern struct timezone sys_tz;
229 231
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 751912326094..f1570b9f9de3 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -160,7 +160,7 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode)
160 160
161 init_MUTEX(&HFS_I(inode)->extents_lock); 161 init_MUTEX(&HFS_I(inode)->extents_lock);
162 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); 162 INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
163 hfs_cat_build_key((btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name); 163 hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
164 inode->i_ino = HFS_SB(sb)->next_id++; 164 inode->i_ino = HFS_SB(sb)->next_id++;
165 inode->i_mode = mode; 165 inode->i_mode = mode;
166 inode->i_uid = current->fsuid; 166 inode->i_uid = current->fsuid;
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 217e32f37e0b..0a473f79c89f 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/cdrom.h> 11#include <linux/cdrom.h>
12#include <linux/genhd.h> 12#include <linux/genhd.h>
13#include <linux/nls.h>
13 14
14#include "hfs_fs.h" 15#include "hfs_fs.h"
15#include "btree.h" 16#include "btree.h"
@@ -343,6 +344,11 @@ void hfs_mdb_put(struct super_block *sb)
343 brelse(HFS_SB(sb)->mdb_bh); 344 brelse(HFS_SB(sb)->mdb_bh);
344 brelse(HFS_SB(sb)->alt_mdb_bh); 345 brelse(HFS_SB(sb)->alt_mdb_bh);
345 346
347 if (HFS_SB(sb)->nls_io)
348 unload_nls(HFS_SB(sb)->nls_io);
349 if (HFS_SB(sb)->nls_disk)
350 unload_nls(HFS_SB(sb)->nls_disk);
351
346 kfree(HFS_SB(sb)); 352 kfree(HFS_SB(sb));
347 sb->s_fs_info = NULL; 353 sb->s_fs_info = NULL;
348} 354}
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index ab783f6afa3b..c5074aeafcae 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -15,8 +15,11 @@
15#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/blkdev.h> 17#include <linux/blkdev.h>
18#include <linux/mount.h>
18#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/nls.h>
19#include <linux/parser.h> 21#include <linux/parser.h>
22#include <linux/seq_file.h>
20#include <linux/vfs.h> 23#include <linux/vfs.h>
21 24
22#include "hfs_fs.h" 25#include "hfs_fs.h"
@@ -111,6 +114,32 @@ static int hfs_remount(struct super_block *sb, int *flags, char *data)
111 return 0; 114 return 0;
112} 115}
113 116
117static int hfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
118{
119 struct hfs_sb_info *sbi = HFS_SB(mnt->mnt_sb);
120
121 if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
122 seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
123 if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
124 seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
125 seq_printf(seq, ",uid=%u,gid=%u", sbi->s_uid, sbi->s_gid);
126 if (sbi->s_file_umask != 0133)
127 seq_printf(seq, ",file_umask=%o", sbi->s_file_umask);
128 if (sbi->s_dir_umask != 0022)
129 seq_printf(seq, ",dir_umask=%o", sbi->s_dir_umask);
130 if (sbi->part >= 0)
131 seq_printf(seq, ",part=%u", sbi->part);
132 if (sbi->session >= 0)
133 seq_printf(seq, ",session=%u", sbi->session);
134 if (sbi->nls_disk)
135 seq_printf(seq, ",codepage=%s", sbi->nls_disk->charset);
136 if (sbi->nls_io)
137 seq_printf(seq, ",iocharset=%s", sbi->nls_io->charset);
138 if (sbi->s_quiet)
139 seq_printf(seq, ",quiet");
140 return 0;
141}
142
114static struct inode *hfs_alloc_inode(struct super_block *sb) 143static struct inode *hfs_alloc_inode(struct super_block *sb)
115{ 144{
116 struct hfs_inode_info *i; 145 struct hfs_inode_info *i;
@@ -133,11 +162,13 @@ static struct super_operations hfs_super_operations = {
133 .write_super = hfs_write_super, 162 .write_super = hfs_write_super,
134 .statfs = hfs_statfs, 163 .statfs = hfs_statfs,
135 .remount_fs = hfs_remount, 164 .remount_fs = hfs_remount,
165 .show_options = hfs_show_options,
136}; 166};
137 167
138enum { 168enum {
139 opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask, 169 opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask,
140 opt_part, opt_session, opt_type, opt_creator, opt_quiet, 170 opt_part, opt_session, opt_type, opt_creator, opt_quiet,
171 opt_codepage, opt_iocharset,
141 opt_err 172 opt_err
142}; 173};
143 174
@@ -152,6 +183,8 @@ static match_table_t tokens = {
152 { opt_type, "type=%s" }, 183 { opt_type, "type=%s" },
153 { opt_creator, "creator=%s" }, 184 { opt_creator, "creator=%s" },
154 { opt_quiet, "quiet" }, 185 { opt_quiet, "quiet" },
186 { opt_codepage, "codepage=%s" },
187 { opt_iocharset, "iocharset=%s" },
155 { opt_err, NULL } 188 { opt_err, NULL }
156}; 189};
157 190
@@ -257,11 +290,46 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
257 case opt_quiet: 290 case opt_quiet:
258 hsb->s_quiet = 1; 291 hsb->s_quiet = 1;
259 break; 292 break;
293 case opt_codepage:
294 if (hsb->nls_disk) {
295 printk("HFS+-fs: unable to change codepage\n");
296 return 0;
297 }
298 p = match_strdup(&args[0]);
299 hsb->nls_disk = load_nls(p);
300 if (!hsb->nls_disk) {
301 printk("HFS+-fs: unable to load codepage \"%s\"\n", p);
302 kfree(p);
303 return 0;
304 }
305 kfree(p);
306 break;
307 case opt_iocharset:
308 if (hsb->nls_io) {
309 printk("HFS: unable to change iocharset\n");
310 return 0;
311 }
312 p = match_strdup(&args[0]);
313 hsb->nls_io = load_nls(p);
314 if (!hsb->nls_io) {
315 printk("HFS: unable to load iocharset \"%s\"\n", p);
316 kfree(p);
317 return 0;
318 }
319 kfree(p);
320 break;
260 default: 321 default:
261 return 0; 322 return 0;
262 } 323 }
263 } 324 }
264 325
326 if (hsb->nls_disk && !hsb->nls_io) {
327 hsb->nls_io = load_nls_default();
328 if (!hsb->nls_io) {
329 printk("HFS: unable to load default iocharset\n");
330 return 0;
331 }
332 }
265 hsb->s_dir_umask &= 0777; 333 hsb->s_dir_umask &= 0777;
266 hsb->s_file_umask &= 0577; 334 hsb->s_file_umask &= 0577;
267 335
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index fb9720abbadd..e673a88b8ae7 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -9,12 +9,15 @@
9 * with ':' vs. '/' as the path-element separator. 9 * with ':' vs. '/' as the path-element separator.
10 */ 10 */
11 11
12#include <linux/types.h>
13#include <linux/nls.h>
14
12#include "hfs_fs.h" 15#include "hfs_fs.h"
13 16
14/*================ Global functions ================*/ 17/*================ Global functions ================*/
15 18
16/* 19/*
17 * hfs_mac2triv() 20 * hfs_mac2asc()
18 * 21 *
19 * Given a 'Pascal String' (a string preceded by a length byte) in 22 * Given a 'Pascal String' (a string preceded by a length byte) in
20 * the Macintosh character set produce the corresponding filename using 23 * the Macintosh character set produce the corresponding filename using
@@ -27,23 +30,58 @@
27 * by ':' which never appears in HFS filenames. All other characters 30 * by ':' which never appears in HFS filenames. All other characters
28 * are passed unchanged from input to output. 31 * are passed unchanged from input to output.
29 */ 32 */
30int hfs_mac2triv(char *out, const struct hfs_name *in) 33int hfs_mac2asc(struct super_block *sb, char *out, const struct hfs_name *in)
31{ 34{
32 const char *p; 35 struct nls_table *nls_disk = HFS_SB(sb)->nls_disk;
33 char c; 36 struct nls_table *nls_io = HFS_SB(sb)->nls_io;
34 int i, len; 37 const char *src;
38 char *dst;
39 int srclen, dstlen, size;
40
41 src = in->name;
42 srclen = in->len;
43 dst = out;
44 dstlen = HFS_MAX_NAMELEN;
45 if (nls_io) {
46 wchar_t ch;
35 47
36 len = in->len; 48 while (srclen > 0) {
37 p = in->name; 49 if (nls_disk) {
38 for (i = 0; i < len; i++) { 50 size = nls_disk->char2uni(src, srclen, &ch);
39 c = *p++; 51 if (size <= 0) {
40 *out++ = c == '/' ? ':' : c; 52 ch = '?';
53 size = 1;
54 }
55 src += size;
56 srclen -= size;
57 } else {
58 ch = *src++;
59 srclen--;
60 }
61 if (ch == '/')
62 ch = ':';
63 size = nls_io->uni2char(ch, dst, dstlen);
64 if (size < 0) {
65 if (size == -ENAMETOOLONG)
66 goto out;
67 *dst = '?';
68 size = 1;
69 }
70 dst += size;
71 dstlen -= size;
72 }
73 } else {
74 char ch;
75
76 while (--srclen >= 0)
77 *dst++ = (ch = *src++) == '/' ? ':' : ch;
41 } 78 }
42 return i; 79out:
80 return dst - out;
43} 81}
44 82
45/* 83/*
46 * hfs_triv2mac() 84 * hfs_asc2mac()
47 * 85 *
48 * Given an ASCII string (not null-terminated) and its length, 86 * Given an ASCII string (not null-terminated) and its length,
49 * generate the corresponding filename in the Macintosh character set 87 * generate the corresponding filename in the Macintosh character set
@@ -54,19 +92,57 @@ int hfs_mac2triv(char *out, const struct hfs_name *in)
54 * This routine is a inverse to hfs_mac2triv(). 92 * This routine is a inverse to hfs_mac2triv().
55 * A ':' is replaced by a '/'. 93 * A ':' is replaced by a '/'.
56 */ 94 */
57void hfs_triv2mac(struct hfs_name *out, struct qstr *in) 95void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, struct qstr *in)
58{ 96{
97 struct nls_table *nls_disk = HFS_SB(sb)->nls_disk;
98 struct nls_table *nls_io = HFS_SB(sb)->nls_io;
59 const char *src; 99 const char *src;
60 char *dst, c; 100 char *dst;
61 int i, len; 101 int srclen, dstlen, size;
62 102
63 out->len = len = min((unsigned int)HFS_NAMELEN, in->len);
64 src = in->name; 103 src = in->name;
104 srclen = in->len;
65 dst = out->name; 105 dst = out->name;
66 for (i = 0; i < len; i++) { 106 dstlen = HFS_NAMELEN;
67 c = *src++; 107 if (nls_io) {
68 *dst++ = c == ':' ? '/' : c; 108 wchar_t ch;
109
110 while (srclen > 0) {
111 size = nls_io->char2uni(src, srclen, &ch);
112 if (size < 0) {
113 ch = '?';
114 size = 1;
115 }
116 src += size;
117 srclen -= size;
118 if (ch == ':')
119 ch = '/';
120 if (nls_disk) {
121 size = nls_disk->uni2char(ch, dst, dstlen);
122 if (size < 0) {
123 if (size == -ENAMETOOLONG)
124 goto out;
125 *dst = '?';
126 size = 1;
127 }
128 dst += size;
129 dstlen -= size;
130 } else {
131 *dst++ = ch > 0xff ? '?' : ch;
132 dstlen--;
133 }
134 }
135 } else {
136 char ch;
137
138 if (dstlen > srclen)
139 dstlen = srclen;
140 while (--dstlen >= 0)
141 *dst++ = (ch = *src++) == ':' ? '/' : ch;
69 } 142 }
70 for (; i < HFS_NAMELEN; i++) 143out:
144 out->len = dst - (char *)out->name;
145 dstlen = HFS_NAMELEN - out->len;
146 while (--dstlen >= 0)
71 *dst++ = 0; 147 *dst++ = 0;
72} 148}
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 8868d3b766fd..b85abc6e6f83 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -18,8 +18,6 @@
18#include "hfsplus_fs.h" 18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h" 19#include "hfsplus_raw.h"
20 20
21#define REF_PAGES 0
22
23/* Copy a specified range of bytes from the raw data of a node */ 21/* Copy a specified range of bytes from the raw data of a node */
24void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) 22void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
25{ 23{
@@ -450,9 +448,7 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
450 page_cache_release(page); 448 page_cache_release(page);
451 goto fail; 449 goto fail;
452 } 450 }
453#if !REF_PAGES
454 page_cache_release(page); 451 page_cache_release(page);
455#endif
456 node->page[i] = page; 452 node->page[i] = page;
457 } 453 }
458 454
@@ -612,13 +608,6 @@ void hfs_bnode_get(struct hfs_bnode *node)
612{ 608{
613 if (node) { 609 if (node) {
614 atomic_inc(&node->refcnt); 610 atomic_inc(&node->refcnt);
615#if REF_PAGES
616 {
617 int i;
618 for (i = 0; i < node->tree->pages_per_bnode; i++)
619 get_page(node->page[i]);
620 }
621#endif
622 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n", 611 dprint(DBG_BNODE_REFS, "get_node(%d:%d): %d\n",
623 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 612 node->tree->cnid, node->this, atomic_read(&node->refcnt));
624 } 613 }
@@ -635,20 +624,12 @@ void hfs_bnode_put(struct hfs_bnode *node)
635 node->tree->cnid, node->this, atomic_read(&node->refcnt)); 624 node->tree->cnid, node->this, atomic_read(&node->refcnt));
636 if (!atomic_read(&node->refcnt)) 625 if (!atomic_read(&node->refcnt))
637 BUG(); 626 BUG();
638 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock)) { 627 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
639#if REF_PAGES
640 for (i = 0; i < tree->pages_per_bnode; i++)
641 put_page(node->page[i]);
642#endif
643 return; 628 return;
644 }
645 for (i = 0; i < tree->pages_per_bnode; i++) { 629 for (i = 0; i < tree->pages_per_bnode; i++) {
646 if (!node->page[i]) 630 if (!node->page[i])
647 continue; 631 continue;
648 mark_page_accessed(node->page[i]); 632 mark_page_accessed(node->page[i]);
649#if REF_PAGES
650 put_page(node->page[i]);
651#endif
652 } 633 }
653 634
654 if (test_bit(HFS_BNODE_DELETED, &node->flags)) { 635 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 533094a570df..2bc0cdd30e56 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -343,8 +343,9 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
343ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size); 343ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size);
344 344
345/* options.c */ 345/* options.c */
346int parse_options(char *, struct hfsplus_sb_info *); 346int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
347void fill_defaults(struct hfsplus_sb_info *); 347void hfsplus_fill_defaults(struct hfsplus_sb_info *);
348int hfsplus_show_options(struct seq_file *, struct vfsmount *);
348 349
349/* tables.c */ 350/* tables.c */
350extern u16 hfsplus_case_fold_table[]; 351extern u16 hfsplus_case_fold_table[];
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 1cca0102c98d..cca0818aa4ca 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -13,6 +13,8 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/parser.h> 14#include <linux/parser.h>
15#include <linux/nls.h> 15#include <linux/nls.h>
16#include <linux/mount.h>
17#include <linux/seq_file.h>
16#include "hfsplus_fs.h" 18#include "hfsplus_fs.h"
17 19
18enum { 20enum {
@@ -38,7 +40,7 @@ static match_table_t tokens = {
38}; 40};
39 41
40/* Initialize an options object to reasonable defaults */ 42/* Initialize an options object to reasonable defaults */
41void fill_defaults(struct hfsplus_sb_info *opts) 43void hfsplus_fill_defaults(struct hfsplus_sb_info *opts)
42{ 44{
43 if (!opts) 45 if (!opts)
44 return; 46 return;
@@ -63,7 +65,7 @@ static inline int match_fourchar(substring_t *arg, u32 *result)
63 65
64/* Parse options from mount. Returns 0 on failure */ 66/* Parse options from mount. Returns 0 on failure */
65/* input is the options passed to mount() as a string */ 67/* input is the options passed to mount() as a string */
66int parse_options(char *input, struct hfsplus_sb_info *sbi) 68int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
67{ 69{
68 char *p; 70 char *p;
69 substring_t args[MAX_OPT_ARGS]; 71 substring_t args[MAX_OPT_ARGS];
@@ -160,3 +162,23 @@ done:
160 162
161 return 1; 163 return 1;
162} 164}
165
166int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
167{
168 struct hfsplus_sb_info *sbi = &HFSPLUS_SB(mnt->mnt_sb);
169
170 if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
171 seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
172 if (sbi->type != HFSPLUS_DEF_CR_TYPE)
173 seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
174 seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask, sbi->uid, sbi->gid);
175 if (sbi->part >= 0)
176 seq_printf(seq, ",part=%u", sbi->part);
177 if (sbi->session >= 0)
178 seq_printf(seq, ",session=%u", sbi->session);
179 if (sbi->nls)
180 seq_printf(seq, ",nls=%s", sbi->nls->charset);
181 if (sbi->flags & HFSPLUS_SB_NODECOMPOSE)
182 seq_printf(seq, ",nodecompose");
183 return 0;
184}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index d55ad67b8e42..fd0f0f050e1d 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -217,8 +217,7 @@ static void hfsplus_put_super(struct super_block *sb)
217 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); 217 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
218 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); 218 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
219 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); 219 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
220 ll_rw_block(WRITE, 1, &HFSPLUS_SB(sb).s_vhbh); 220 sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
221 wait_on_buffer(HFSPLUS_SB(sb).s_vhbh);
222 } 221 }
223 222
224 hfs_btree_close(HFSPLUS_SB(sb).cat_tree); 223 hfs_btree_close(HFSPLUS_SB(sb).cat_tree);
@@ -277,6 +276,7 @@ static struct super_operations hfsplus_sops = {
277 .write_super = hfsplus_write_super, 276 .write_super = hfsplus_write_super,
278 .statfs = hfsplus_statfs, 277 .statfs = hfsplus_statfs,
279 .remount_fs = hfsplus_remount, 278 .remount_fs = hfsplus_remount,
279 .show_options = hfsplus_show_options,
280}; 280};
281 281
282static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) 282static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
@@ -297,8 +297,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
297 memset(sbi, 0, sizeof(HFSPLUS_SB(sb))); 297 memset(sbi, 0, sizeof(HFSPLUS_SB(sb)));
298 sb->s_fs_info = sbi; 298 sb->s_fs_info = sbi;
299 INIT_HLIST_HEAD(&sbi->rsrc_inodes); 299 INIT_HLIST_HEAD(&sbi->rsrc_inodes);
300 fill_defaults(sbi); 300 hfsplus_fill_defaults(sbi);
301 if (!parse_options(data, sbi)) { 301 if (!hfsplus_parse_options(data, sbi)) {
302 if (!silent) 302 if (!silent)
303 printk("HFS+-fs: unable to parse mount options\n"); 303 printk("HFS+-fs: unable to parse mount options\n");
304 err = -EINVAL; 304 err = -EINVAL;
@@ -415,8 +415,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
415 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); 415 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
416 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); 416 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
417 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh); 417 mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
418 ll_rw_block(WRITE, 1, &HFSPLUS_SB(sb).s_vhbh); 418 sync_dirty_buffer(HFSPLUS_SB(sb).s_vhbh);
419 wait_on_buffer(HFSPLUS_SB(sb).s_vhbh);
420 419
421 if (!HFSPLUS_SB(sb).hidden_dir) { 420 if (!HFSPLUS_SB(sb).hidden_dir) {
422 printk("HFS+: create hidden dir...\n"); 421 printk("HFS+: create hidden dir...\n");
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 67bca0d4a33b..cca3fb693f99 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -49,7 +49,6 @@ struct hostfs_iattr {
49 struct timespec ia_atime; 49 struct timespec ia_atime;
50 struct timespec ia_mtime; 50 struct timespec ia_mtime;
51 struct timespec ia_ctime; 51 struct timespec ia_ctime;
52 unsigned int ia_attr_flags;
53}; 52};
54 53
55extern int stat_file(const char *path, unsigned long long *inode_out, 54extern int stat_file(const char *path, unsigned long long *inode_out,
diff --git a/fs/inode.c b/fs/inode.c
index e57f1724db3e..71df1b1e8f75 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1195,9 +1195,6 @@ void update_atime(struct inode *inode)
1195 if (!timespec_equal(&inode->i_atime, &now)) { 1195 if (!timespec_equal(&inode->i_atime, &now)) {
1196 inode->i_atime = now; 1196 inode->i_atime = now;
1197 mark_inode_dirty_sync(inode); 1197 mark_inode_dirty_sync(inode);
1198 } else {
1199 if (!timespec_equal(&inode->i_atime, &now))
1200 inode->i_atime = now;
1201 } 1198 }
1202} 1199}
1203 1200
diff --git a/fs/inotify.c b/fs/inotify.c
index 2e4e2a57708c..a37e9fb1da58 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -37,6 +37,7 @@
37#include <asm/ioctls.h> 37#include <asm/ioctls.h>
38 38
39static atomic_t inotify_cookie; 39static atomic_t inotify_cookie;
40static atomic_t inotify_watches;
40 41
41static kmem_cache_t *watch_cachep; 42static kmem_cache_t *watch_cachep;
42static kmem_cache_t *event_cachep; 43static kmem_cache_t *event_cachep;
@@ -422,6 +423,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
422 get_inotify_watch(watch); 423 get_inotify_watch(watch);
423 424
424 atomic_inc(&dev->user->inotify_watches); 425 atomic_inc(&dev->user->inotify_watches);
426 atomic_inc(&inotify_watches);
425 427
426 return watch; 428 return watch;
427} 429}
@@ -454,6 +456,7 @@ static void remove_watch_no_event(struct inotify_watch *watch,
454 list_del(&watch->d_list); 456 list_del(&watch->d_list);
455 457
456 atomic_dec(&dev->user->inotify_watches); 458 atomic_dec(&dev->user->inotify_watches);
459 atomic_dec(&inotify_watches);
457 idr_remove(&dev->idr, watch->wd); 460 idr_remove(&dev->idr, watch->wd);
458 put_inotify_watch(watch); 461 put_inotify_watch(watch);
459} 462}
@@ -532,6 +535,9 @@ void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
532 struct dentry *parent; 535 struct dentry *parent;
533 struct inode *inode; 536 struct inode *inode;
534 537
538 if (!atomic_read (&inotify_watches))
539 return;
540
535 spin_lock(&dentry->d_lock); 541 spin_lock(&dentry->d_lock);
536 parent = dentry->d_parent; 542 parent = dentry->d_parent;
537 inode = parent->d_inode; 543 inode = parent->d_inode;
@@ -925,6 +931,7 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
925 struct nameidata nd; 931 struct nameidata nd;
926 struct file *filp; 932 struct file *filp;
927 int ret, fput_needed; 933 int ret, fput_needed;
934 int mask_add = 0;
928 935
929 filp = fget_light(fd, &fput_needed); 936 filp = fget_light(fd, &fput_needed);
930 if (unlikely(!filp)) 937 if (unlikely(!filp))
@@ -947,6 +954,9 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
947 down(&inode->inotify_sem); 954 down(&inode->inotify_sem);
948 down(&dev->sem); 955 down(&dev->sem);
949 956
957 if (mask & IN_MASK_ADD)
958 mask_add = 1;
959
950 /* don't let user-space set invalid bits: we don't want flags set */ 960 /* don't let user-space set invalid bits: we don't want flags set */
951 mask &= IN_ALL_EVENTS; 961 mask &= IN_ALL_EVENTS;
952 if (unlikely(!mask)) { 962 if (unlikely(!mask)) {
@@ -960,7 +970,10 @@ asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
960 */ 970 */
961 old = inode_find_dev(inode, dev); 971 old = inode_find_dev(inode, dev);
962 if (unlikely(old)) { 972 if (unlikely(old)) {
963 old->mask = mask; 973 if (mask_add)
974 old->mask |= mask;
975 else
976 old->mask = mask;
964 ret = old->wd; 977 ret = old->wd;
965 goto out; 978 goto out;
966 } 979 }
@@ -1043,6 +1056,7 @@ static int __init inotify_setup(void)
1043 inotify_max_user_watches = 8192; 1056 inotify_max_user_watches = 8192;
1044 1057
1045 atomic_set(&inotify_cookie, 0); 1058 atomic_set(&inotify_cookie, 0);
1059 atomic_set(&inotify_watches, 0);
1046 1060
1047 watch_cachep = kmem_cache_create("inotify_watch_cache", 1061 watch_cachep = kmem_cache_create("inotify_watch_cache",
1048 sizeof(struct inotify_watch), 1062 sizeof(struct inotify_watch),
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 5a97e346bd95..014a51fd00d7 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -204,7 +204,7 @@ __flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
204 int i; 204 int i;
205 205
206 spin_unlock(&journal->j_list_lock); 206 spin_unlock(&journal->j_list_lock);
207 ll_rw_block(WRITE, *batch_count, bhs); 207 ll_rw_block(SWRITE, *batch_count, bhs);
208 spin_lock(&journal->j_list_lock); 208 spin_lock(&journal->j_list_lock);
209 for (i = 0; i < *batch_count; i++) { 209 for (i = 0; i < *batch_count; i++) {
210 struct buffer_head *bh = bhs[i]; 210 struct buffer_head *bh = bhs[i];
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index dac720c837ab..2a3e310f79ef 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -358,7 +358,7 @@ write_out_data:
358 jbd_debug(2, "submit %d writes\n", 358 jbd_debug(2, "submit %d writes\n",
359 bufs); 359 bufs);
360 spin_unlock(&journal->j_list_lock); 360 spin_unlock(&journal->j_list_lock);
361 ll_rw_block(WRITE, bufs, wbuf); 361 ll_rw_block(SWRITE, bufs, wbuf);
362 journal_brelse_array(wbuf, bufs); 362 journal_brelse_array(wbuf, bufs);
363 bufs = 0; 363 bufs = 0;
364 goto write_out_data; 364 goto write_out_data;
@@ -381,7 +381,7 @@ write_out_data:
381 381
382 if (bufs) { 382 if (bufs) {
383 spin_unlock(&journal->j_list_lock); 383 spin_unlock(&journal->j_list_lock);
384 ll_rw_block(WRITE, bufs, wbuf); 384 ll_rw_block(SWRITE, bufs, wbuf);
385 journal_brelse_array(wbuf, bufs); 385 journal_brelse_array(wbuf, bufs);
386 spin_lock(&journal->j_list_lock); 386 spin_lock(&journal->j_list_lock);
387 } 387 }
@@ -720,11 +720,17 @@ wait_for_iobuf:
720 J_ASSERT(commit_transaction->t_log_list == NULL); 720 J_ASSERT(commit_transaction->t_log_list == NULL);
721 721
722restart_loop: 722restart_loop:
723 /*
724 * As there are other places (journal_unmap_buffer()) adding buffers
725 * to this list we have to be careful and hold the j_list_lock.
726 */
727 spin_lock(&journal->j_list_lock);
723 while (commit_transaction->t_forget) { 728 while (commit_transaction->t_forget) {
724 transaction_t *cp_transaction; 729 transaction_t *cp_transaction;
725 struct buffer_head *bh; 730 struct buffer_head *bh;
726 731
727 jh = commit_transaction->t_forget; 732 jh = commit_transaction->t_forget;
733 spin_unlock(&journal->j_list_lock);
728 bh = jh2bh(jh); 734 bh = jh2bh(jh);
729 jbd_lock_bh_state(bh); 735 jbd_lock_bh_state(bh);
730 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || 736 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
@@ -792,9 +798,25 @@ restart_loop:
792 journal_remove_journal_head(bh); /* needs a brelse */ 798 journal_remove_journal_head(bh); /* needs a brelse */
793 release_buffer_page(bh); 799 release_buffer_page(bh);
794 } 800 }
801 cond_resched_lock(&journal->j_list_lock);
802 }
803 spin_unlock(&journal->j_list_lock);
804 /*
805 * This is a bit sleazy. We borrow j_list_lock to protect
806 * journal->j_committing_transaction in __journal_remove_checkpoint.
807 * Really, __journal_remove_checkpoint should be using j_state_lock but
808 * it's a bit hassle to hold that across __journal_remove_checkpoint
809 */
810 spin_lock(&journal->j_state_lock);
811 spin_lock(&journal->j_list_lock);
812 /*
813 * Now recheck if some buffers did not get attached to the transaction
814 * while the lock was dropped...
815 */
816 if (commit_transaction->t_forget) {
795 spin_unlock(&journal->j_list_lock); 817 spin_unlock(&journal->j_list_lock);
796 if (cond_resched()) 818 spin_unlock(&journal->j_state_lock);
797 goto restart_loop; 819 goto restart_loop;
798 } 820 }
799 821
800 /* Done with this transaction! */ 822 /* Done with this transaction! */
@@ -803,14 +825,6 @@ restart_loop:
803 825
804 J_ASSERT(commit_transaction->t_state == T_COMMIT); 826 J_ASSERT(commit_transaction->t_state == T_COMMIT);
805 827
806 /*
807 * This is a bit sleazy. We borrow j_list_lock to protect
808 * journal->j_committing_transaction in __journal_remove_checkpoint.
809 * Really, __jornal_remove_checkpoint should be using j_state_lock but
810 * it's a bit hassle to hold that across __journal_remove_checkpoint
811 */
812 spin_lock(&journal->j_state_lock);
813 spin_lock(&journal->j_list_lock);
814 commit_transaction->t_state = T_FINISHED; 828 commit_transaction->t_state = T_FINISHED;
815 J_ASSERT(commit_transaction == journal->j_committing_transaction); 829 J_ASSERT(commit_transaction == journal->j_committing_transaction);
816 journal->j_commit_sequence = commit_transaction->t_tid; 830 journal->j_commit_sequence = commit_transaction->t_tid;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 5e7b43949517..7ae2c4fe506b 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -65,7 +65,6 @@ EXPORT_SYMBOL(journal_set_features);
65EXPORT_SYMBOL(journal_create); 65EXPORT_SYMBOL(journal_create);
66EXPORT_SYMBOL(journal_load); 66EXPORT_SYMBOL(journal_load);
67EXPORT_SYMBOL(journal_destroy); 67EXPORT_SYMBOL(journal_destroy);
68EXPORT_SYMBOL(journal_recover);
69EXPORT_SYMBOL(journal_update_superblock); 68EXPORT_SYMBOL(journal_update_superblock);
70EXPORT_SYMBOL(journal_abort); 69EXPORT_SYMBOL(journal_abort);
71EXPORT_SYMBOL(journal_errno); 70EXPORT_SYMBOL(journal_errno);
@@ -81,6 +80,7 @@ EXPORT_SYMBOL(journal_try_to_free_buffers);
81EXPORT_SYMBOL(journal_force_commit); 80EXPORT_SYMBOL(journal_force_commit);
82 81
83static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 82static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
83static void __journal_abort_soft (journal_t *journal, int errno);
84 84
85/* 85/*
86 * Helper function used to manage commit timeouts 86 * Helper function used to manage commit timeouts
@@ -93,16 +93,6 @@ static void commit_timeout(unsigned long __data)
93 wake_up_process(p); 93 wake_up_process(p);
94} 94}
95 95
96/* Static check for data structure consistency. There's no code
97 * invoked --- we'll just get a linker failure if things aren't right.
98 */
99void __journal_internal_check(void)
100{
101 extern void journal_bad_superblock_size(void);
102 if (sizeof(struct journal_superblock_s) != 1024)
103 journal_bad_superblock_size();
104}
105
106/* 96/*
107 * kjournald: The main thread function used to manage a logging device 97 * kjournald: The main thread function used to manage a logging device
108 * journal. 98 * journal.
@@ -119,16 +109,12 @@ void __journal_internal_check(void)
119 * known as checkpointing, and this thread is responsible for that job. 109 * known as checkpointing, and this thread is responsible for that job.
120 */ 110 */
121 111
122journal_t *current_journal; // AKPM: debug 112static int kjournald(void *arg)
123
124int kjournald(void *arg)
125{ 113{
126 journal_t *journal = (journal_t *) arg; 114 journal_t *journal = (journal_t *) arg;
127 transaction_t *transaction; 115 transaction_t *transaction;
128 struct timer_list timer; 116 struct timer_list timer;
129 117
130 current_journal = journal;
131
132 daemonize("kjournald"); 118 daemonize("kjournald");
133 119
134 /* Set up an interval timer which can be used to trigger a 120 /* Set up an interval timer which can be used to trigger a
@@ -193,6 +179,8 @@ loop:
193 if (transaction && time_after_eq(jiffies, 179 if (transaction && time_after_eq(jiffies,
194 transaction->t_expires)) 180 transaction->t_expires))
195 should_sleep = 0; 181 should_sleep = 0;
182 if (journal->j_flags & JFS_UNMOUNT)
183 should_sleep = 0;
196 if (should_sleep) { 184 if (should_sleep) {
197 spin_unlock(&journal->j_state_lock); 185 spin_unlock(&journal->j_state_lock);
198 schedule(); 186 schedule();
@@ -969,7 +957,7 @@ void journal_update_superblock(journal_t *journal, int wait)
969 if (wait) 957 if (wait)
970 sync_dirty_buffer(bh); 958 sync_dirty_buffer(bh);
971 else 959 else
972 ll_rw_block(WRITE, 1, &bh); 960 ll_rw_block(SWRITE, 1, &bh);
973 961
974out: 962out:
975 /* If we have just flushed the log (by marking s_start==0), then 963 /* If we have just flushed the log (by marking s_start==0), then
@@ -1439,7 +1427,7 @@ int journal_wipe(journal_t *journal, int write)
1439 * device this journal is present. 1427 * device this journal is present.
1440 */ 1428 */
1441 1429
1442const char *journal_dev_name(journal_t *journal, char *buffer) 1430static const char *journal_dev_name(journal_t *journal, char *buffer)
1443{ 1431{
1444 struct block_device *bdev; 1432 struct block_device *bdev;
1445 1433
@@ -1485,7 +1473,7 @@ void __journal_abort_hard(journal_t *journal)
1485 1473
1486/* Soft abort: record the abort error status in the journal superblock, 1474/* Soft abort: record the abort error status in the journal superblock,
1487 * but don't do any other IO. */ 1475 * but don't do any other IO. */
1488void __journal_abort_soft (journal_t *journal, int errno) 1476static void __journal_abort_soft (journal_t *journal, int errno)
1489{ 1477{
1490 if (journal->j_flags & JFS_ABORT) 1478 if (journal->j_flags & JFS_ABORT)
1491 return; 1479 return;
@@ -1880,7 +1868,7 @@ EXPORT_SYMBOL(journal_enable_debug);
1880 1868
1881static struct proc_dir_entry *proc_jbd_debug; 1869static struct proc_dir_entry *proc_jbd_debug;
1882 1870
1883int read_jbd_debug(char *page, char **start, off_t off, 1871static int read_jbd_debug(char *page, char **start, off_t off,
1884 int count, int *eof, void *data) 1872 int count, int *eof, void *data)
1885{ 1873{
1886 int ret; 1874 int ret;
@@ -1890,7 +1878,7 @@ int read_jbd_debug(char *page, char **start, off_t off,
1890 return ret; 1878 return ret;
1891} 1879}
1892 1880
1893int write_jbd_debug(struct file *file, const char __user *buffer, 1881static int write_jbd_debug(struct file *file, const char __user *buffer,
1894 unsigned long count, void *data) 1882 unsigned long count, void *data)
1895{ 1883{
1896 char buf[32]; 1884 char buf[32];
@@ -1979,6 +1967,14 @@ static int __init journal_init(void)
1979{ 1967{
1980 int ret; 1968 int ret;
1981 1969
1970/* Static check for data structure consistency. There's no code
1971 * invoked --- we'll just get a linker failure if things aren't right.
1972 */
1973 extern void journal_bad_superblock_size(void);
1974 if (sizeof(struct journal_superblock_s) != 1024)
1975 journal_bad_superblock_size();
1976
1977
1982 ret = journal_init_caches(); 1978 ret = journal_init_caches();
1983 if (ret != 0) 1979 if (ret != 0)
1984 journal_destroy_caches(); 1980 journal_destroy_caches();
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index d327a598f861..a56144183462 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -116,7 +116,8 @@ static inline int hash(journal_t *journal, unsigned long block)
116 (block << (hash_shift - 12))) & (table->hash_size - 1); 116 (block << (hash_shift - 12))) & (table->hash_size - 1);
117} 117}
118 118
119int insert_revoke_hash(journal_t *journal, unsigned long blocknr, tid_t seq) 119static int insert_revoke_hash(journal_t *journal, unsigned long blocknr,
120 tid_t seq)
120{ 121{
121 struct list_head *hash_list; 122 struct list_head *hash_list;
122 struct jbd_revoke_record_s *record; 123 struct jbd_revoke_record_s *record;
@@ -613,7 +614,7 @@ static void flush_descriptor(journal_t *journal,
613 set_buffer_jwrite(bh); 614 set_buffer_jwrite(bh);
614 BUFFER_TRACE(bh, "write"); 615 BUFFER_TRACE(bh, "write");
615 set_buffer_dirty(bh); 616 set_buffer_dirty(bh);
616 ll_rw_block(WRITE, 1, &bh); 617 ll_rw_block(SWRITE, 1, &bh);
617} 618}
618#endif 619#endif
619 620
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 77b7662b840b..c6ec66fd8766 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -490,23 +490,21 @@ void journal_unlock_updates (journal_t *journal)
490 */ 490 */
491static void jbd_unexpected_dirty_buffer(struct journal_head *jh) 491static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
492{ 492{
493 struct buffer_head *bh = jh2bh(jh);
494 int jlist; 493 int jlist;
495 494
496 if (buffer_dirty(bh)) { 495 /* If this buffer is one which might reasonably be dirty
497 /* If this buffer is one which might reasonably be dirty 496 * --- ie. data, or not part of this journal --- then
498 * --- ie. data, or not part of this journal --- then 497 * we're OK to leave it alone, but otherwise we need to
499 * we're OK to leave it alone, but otherwise we need to 498 * move the dirty bit to the journal's own internal
500 * move the dirty bit to the journal's own internal 499 * JBDDirty bit. */
501 * JBDDirty bit. */ 500 jlist = jh->b_jlist;
502 jlist = jh->b_jlist; 501
503 502 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
504 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 503 jlist == BJ_Shadow || jlist == BJ_Forget) {
505 jlist == BJ_Shadow || jlist == BJ_Forget) { 504 struct buffer_head *bh = jh2bh(jh);
506 if (test_clear_buffer_dirty(jh2bh(jh))) { 505
507 set_bit(BH_JBDDirty, &jh2bh(jh)->b_state); 506 if (test_clear_buffer_dirty(bh))
508 } 507 set_buffer_jbddirty(bh);
509 }
510 } 508 }
511} 509}
512 510
@@ -574,9 +572,14 @@ repeat:
574 if (jh->b_next_transaction) 572 if (jh->b_next_transaction)
575 J_ASSERT_JH(jh, jh->b_next_transaction == 573 J_ASSERT_JH(jh, jh->b_next_transaction ==
576 transaction); 574 transaction);
577 JBUFFER_TRACE(jh, "Unexpected dirty buffer"); 575 }
578 jbd_unexpected_dirty_buffer(jh); 576 /*
579 } 577 * In any case we need to clean the dirty flag and we must
578 * do it under the buffer lock to be sure we don't race
579 * with running write-out.
580 */
581 JBUFFER_TRACE(jh, "Unexpected dirty buffer");
582 jbd_unexpected_dirty_buffer(jh);
580 } 583 }
581 584
582 unlock_buffer(bh); 585 unlock_buffer(bh);
diff --git a/fs/jffs/inode-v23.c b/fs/jffs/inode-v23.c
index bfbeb4c86e03..777b90057b89 100644
--- a/fs/jffs/inode-v23.c
+++ b/fs/jffs/inode-v23.c
@@ -1629,9 +1629,6 @@ static int jffs_fsync(struct file *f, struct dentry *d, int datasync)
1629} 1629}
1630 1630
1631 1631
1632extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
1633extern loff_t generic_file_llseek(struct file *, loff_t, int) __attribute__((weak));
1634
1635static struct file_operations jffs_file_operations = 1632static struct file_operations jffs_file_operations =
1636{ 1633{
1637 .open = generic_file_open, 1634 .open = generic_file_open,
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index bd9ed9b0247b..8279bf0133ff 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -21,9 +21,6 @@
21#include <linux/jffs2.h> 21#include <linux/jffs2.h>
22#include "nodelist.h" 22#include "nodelist.h"
23 23
24extern int generic_file_open(struct inode *, struct file *) __attribute__((weak));
25extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin) __attribute__((weak));
26
27static int jffs2_commit_write (struct file *filp, struct page *pg, 24static int jffs2_commit_write (struct file *filp, struct page *pg,
28 unsigned start, unsigned end); 25 unsigned start, unsigned end);
29static int jffs2_prepare_write (struct file *filp, struct page *pg, 26static int jffs2_prepare_write (struct file *filp, struct page *pg,
diff --git a/fs/jfs/jfs_filsys.h b/fs/jfs/jfs_filsys.h
index 86ccac80f0ab..72a5588faeca 100644
--- a/fs/jfs/jfs_filsys.h
+++ b/fs/jfs/jfs_filsys.h
@@ -37,6 +37,9 @@
37#define JFS_ERR_CONTINUE 0x00000004 /* continue */ 37#define JFS_ERR_CONTINUE 0x00000004 /* continue */
38#define JFS_ERR_PANIC 0x00000008 /* panic */ 38#define JFS_ERR_PANIC 0x00000008 /* panic */
39 39
40#define JFS_USRQUOTA 0x00000010
41#define JFS_GRPQUOTA 0x00000020
42
40/* platform option (conditional compilation) */ 43/* platform option (conditional compilation) */
41#define JFS_AIX 0x80000000 /* AIX support */ 44#define JFS_AIX 0x80000000 /* AIX support */
42/* POSIX name/directory support */ 45/* POSIX name/directory support */
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 9ff89720f93b..71bc34b96b2b 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -23,9 +23,11 @@
23#include <linux/parser.h> 23#include <linux/parser.h>
24#include <linux/completion.h> 24#include <linux/completion.h>
25#include <linux/vfs.h> 25#include <linux/vfs.h>
26#include <linux/mount.h>
26#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
27#include <linux/posix_acl.h> 28#include <linux/posix_acl.h>
28#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <linux/seq_file.h>
29 31
30#include "jfs_incore.h" 32#include "jfs_incore.h"
31#include "jfs_filsys.h" 33#include "jfs_filsys.h"
@@ -192,7 +194,8 @@ static void jfs_put_super(struct super_block *sb)
192 194
193enum { 195enum {
194 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize, 196 Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
195 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, 197 Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
198 Opt_usrquota, Opt_grpquota
196}; 199};
197 200
198static match_table_t tokens = { 201static match_table_t tokens = {
@@ -204,8 +207,8 @@ static match_table_t tokens = {
204 {Opt_errors, "errors=%s"}, 207 {Opt_errors, "errors=%s"},
205 {Opt_ignore, "noquota"}, 208 {Opt_ignore, "noquota"},
206 {Opt_ignore, "quota"}, 209 {Opt_ignore, "quota"},
207 {Opt_ignore, "usrquota"}, 210 {Opt_usrquota, "usrquota"},
208 {Opt_ignore, "grpquota"}, 211 {Opt_grpquota, "grpquota"},
209 {Opt_err, NULL} 212 {Opt_err, NULL}
210}; 213};
211 214
@@ -293,6 +296,24 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
293 } 296 }
294 break; 297 break;
295 } 298 }
299
300#if defined(CONFIG_QUOTA)
301 case Opt_quota:
302 case Opt_usrquota:
303 *flag |= JFS_USRQUOTA;
304 break;
305 case Opt_grpquota:
306 *flag |= JFS_GRPQUOTA;
307 break;
308#else
309 case Opt_usrquota:
310 case Opt_grpquota:
311 case Opt_quota:
312 printk(KERN_ERR
313 "JFS: quota operations not supported\n");
314 break;
315#endif
316
296 default: 317 default:
297 printk("jfs: Unrecognized mount option \"%s\" " 318 printk("jfs: Unrecognized mount option \"%s\" "
298 " or missing value\n", p); 319 " or missing value\n", p);
@@ -539,6 +560,26 @@ static int jfs_sync_fs(struct super_block *sb, int wait)
539 return 0; 560 return 0;
540} 561}
541 562
563static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
564{
565 struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
566
567 if (sbi->flag & JFS_NOINTEGRITY)
568 seq_puts(seq, ",nointegrity");
569 else
570 seq_puts(seq, ",integrity");
571
572#if defined(CONFIG_QUOTA)
573 if (sbi->flag & JFS_USRQUOTA)
574 seq_puts(seq, ",usrquota");
575
576 if (sbi->flag & JFS_GRPQUOTA)
577 seq_puts(seq, ",grpquota");
578#endif
579
580 return 0;
581}
582
542static struct super_operations jfs_super_operations = { 583static struct super_operations jfs_super_operations = {
543 .alloc_inode = jfs_alloc_inode, 584 .alloc_inode = jfs_alloc_inode,
544 .destroy_inode = jfs_destroy_inode, 585 .destroy_inode = jfs_destroy_inode,
@@ -552,6 +593,7 @@ static struct super_operations jfs_super_operations = {
552 .unlockfs = jfs_unlockfs, 593 .unlockfs = jfs_unlockfs,
553 .statfs = jfs_statfs, 594 .statfs = jfs_statfs,
554 .remount_fs = jfs_remount, 595 .remount_fs = jfs_remount,
596 .show_options = jfs_show_options
555}; 597};
556 598
557static struct export_operations jfs_export_operations = { 599static struct export_operations jfs_export_operations = {
diff --git a/fs/namei.c b/fs/namei.c
index 6ec1f0fefc5b..145e852c4bd0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -525,6 +525,22 @@ static inline int __do_follow_link(struct path *path, struct nameidata *nd)
525 return error; 525 return error;
526} 526}
527 527
528static inline void dput_path(struct path *path, struct nameidata *nd)
529{
530 dput(path->dentry);
531 if (path->mnt != nd->mnt)
532 mntput(path->mnt);
533}
534
535static inline void path_to_nameidata(struct path *path, struct nameidata *nd)
536{
537 dput(nd->dentry);
538 if (nd->mnt != path->mnt)
539 mntput(nd->mnt);
540 nd->mnt = path->mnt;
541 nd->dentry = path->dentry;
542}
543
528/* 544/*
529 * This limits recursive symlink follows to 8, while 545 * This limits recursive symlink follows to 8, while
530 * limiting consecutive symlinks to 40. 546 * limiting consecutive symlinks to 40.
@@ -552,9 +568,7 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
552 nd->depth--; 568 nd->depth--;
553 return err; 569 return err;
554loop: 570loop:
555 dput(path->dentry); 571 dput_path(path, nd);
556 if (path->mnt != nd->mnt)
557 mntput(path->mnt);
558 path_release(nd); 572 path_release(nd);
559 return err; 573 return err;
560} 574}
@@ -813,13 +827,8 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd)
813 err = -ENOTDIR; 827 err = -ENOTDIR;
814 if (!inode->i_op) 828 if (!inode->i_op)
815 break; 829 break;
816 } else { 830 } else
817 dput(nd->dentry); 831 path_to_nameidata(&next, nd);
818 if (nd->mnt != next.mnt)
819 mntput(nd->mnt);
820 nd->mnt = next.mnt;
821 nd->dentry = next.dentry;
822 }
823 err = -ENOTDIR; 832 err = -ENOTDIR;
824 if (!inode->i_op->lookup) 833 if (!inode->i_op->lookup)
825 break; 834 break;
@@ -859,13 +868,8 @@ last_component:
859 if (err) 868 if (err)
860 goto return_err; 869 goto return_err;
861 inode = nd->dentry->d_inode; 870 inode = nd->dentry->d_inode;
862 } else { 871 } else
863 dput(nd->dentry); 872 path_to_nameidata(&next, nd);
864 if (nd->mnt != next.mnt)
865 mntput(nd->mnt);
866 nd->mnt = next.mnt;
867 nd->dentry = next.dentry;
868 }
869 err = -ENOENT; 873 err = -ENOENT;
870 if (!inode) 874 if (!inode)
871 break; 875 break;
@@ -901,9 +905,7 @@ return_reval:
901return_base: 905return_base:
902 return 0; 906 return 0;
903out_dput: 907out_dput:
904 dput(next.dentry); 908 dput_path(&next, nd);
905 if (nd->mnt != next.mnt)
906 mntput(next.mnt);
907 break; 909 break;
908 } 910 }
909 path_release(nd); 911 path_release(nd);
@@ -1507,11 +1509,7 @@ do_last:
1507 if (path.dentry->d_inode->i_op && path.dentry->d_inode->i_op->follow_link) 1509 if (path.dentry->d_inode->i_op && path.dentry->d_inode->i_op->follow_link)
1508 goto do_link; 1510 goto do_link;
1509 1511
1510 dput(nd->dentry); 1512 path_to_nameidata(&path, nd);
1511 nd->dentry = path.dentry;
1512 if (nd->mnt != path.mnt)
1513 mntput(nd->mnt);
1514 nd->mnt = path.mnt;
1515 error = -EISDIR; 1513 error = -EISDIR;
1516 if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode)) 1514 if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode))
1517 goto exit; 1515 goto exit;
@@ -1522,9 +1520,7 @@ ok:
1522 return 0; 1520 return 0;
1523 1521
1524exit_dput: 1522exit_dput:
1525 dput(path.dentry); 1523 dput_path(&path, nd);
1526 if (nd->mnt != path.mnt)
1527 mntput(path.mnt);
1528exit: 1524exit:
1529 path_release(nd); 1525 path_release(nd);
1530 return error; 1526 return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index 79bd8a46e1e7..34156260c9b6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -40,7 +40,7 @@ static inline int sysfs_init(void)
40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); 40 __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
41 41
42static struct list_head *mount_hashtable; 42static struct list_head *mount_hashtable;
43static int hash_mask, hash_bits; 43static int hash_mask __read_mostly, hash_bits __read_mostly;
44static kmem_cache_t *mnt_cache; 44static kmem_cache_t *mnt_cache;
45 45
46static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) 46static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
@@ -1334,8 +1334,12 @@ asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *p
1334 error = -EINVAL; 1334 error = -EINVAL;
1335 if (user_nd.mnt->mnt_root != user_nd.dentry) 1335 if (user_nd.mnt->mnt_root != user_nd.dentry)
1336 goto out2; /* not a mountpoint */ 1336 goto out2; /* not a mountpoint */
1337 if (user_nd.mnt->mnt_parent == user_nd.mnt)
1338 goto out2; /* not attached */
1337 if (new_nd.mnt->mnt_root != new_nd.dentry) 1339 if (new_nd.mnt->mnt_root != new_nd.dentry)
1338 goto out2; /* not a mountpoint */ 1340 goto out2; /* not a mountpoint */
1341 if (new_nd.mnt->mnt_parent == new_nd.mnt)
1342 goto out2; /* not attached */
1339 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */ 1343 tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
1340 spin_lock(&vfsmount_lock); 1344 spin_lock(&vfsmount_lock);
1341 if (tmp != new_nd.mnt) { 1345 if (tmp != new_nd.mnt) {
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 9a11aa39e2e4..057aff745506 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -26,6 +26,7 @@
26#include <linux/namei.h> 26#include <linux/namei.h>
27#include <linux/mount.h> 27#include <linux/mount.h>
28#include <linux/hash.h> 28#include <linux/hash.h>
29#include <linux/module.h>
29 30
30#include <linux/sunrpc/svc.h> 31#include <linux/sunrpc/svc.h>
31#include <linux/nfsd/nfsd.h> 32#include <linux/nfsd/nfsd.h>
@@ -221,6 +222,7 @@ static int expkey_show(struct seq_file *m,
221} 222}
222 223
223struct cache_detail svc_expkey_cache = { 224struct cache_detail svc_expkey_cache = {
225 .owner = THIS_MODULE,
224 .hash_size = EXPKEY_HASHMAX, 226 .hash_size = EXPKEY_HASHMAX,
225 .hash_table = expkey_table, 227 .hash_table = expkey_table,
226 .name = "nfsd.fh", 228 .name = "nfsd.fh",
@@ -456,6 +458,7 @@ static int svc_export_show(struct seq_file *m,
456 return 0; 458 return 0;
457} 459}
458struct cache_detail svc_export_cache = { 460struct cache_detail svc_export_cache = {
461 .owner = THIS_MODULE,
459 .hash_size = EXPORT_HASHMAX, 462 .hash_size = EXPORT_HASHMAX,
460 .hash_table = export_table, 463 .hash_table = export_table,
461 .name = "nfsd.export", 464 .name = "nfsd.export",
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 5605a26efc57..13369650cdf9 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -187,6 +187,7 @@ static int idtoname_parse(struct cache_detail *, char *, int);
187static struct ent *idtoname_lookup(struct ent *, int); 187static struct ent *idtoname_lookup(struct ent *, int);
188 188
189static struct cache_detail idtoname_cache = { 189static struct cache_detail idtoname_cache = {
190 .owner = THIS_MODULE,
190 .hash_size = ENT_HASHMAX, 191 .hash_size = ENT_HASHMAX,
191 .hash_table = idtoname_table, 192 .hash_table = idtoname_table,
192 .name = "nfs4.idtoname", 193 .name = "nfs4.idtoname",
@@ -320,6 +321,7 @@ static struct ent *nametoid_lookup(struct ent *, int);
320static int nametoid_parse(struct cache_detail *, char *, int); 321static int nametoid_parse(struct cache_detail *, char *, int);
321 322
322static struct cache_detail nametoid_cache = { 323static struct cache_detail nametoid_cache = {
324 .owner = THIS_MODULE,
323 .hash_size = ENT_HASHMAX, 325 .hash_size = ENT_HASHMAX,
324 .hash_table = nametoid_table, 326 .hash_table = nametoid_table,
325 .name = "nfs4.nametoid", 327 .name = "nfs4.nametoid",
@@ -404,8 +406,10 @@ nfsd_idmap_init(void)
404void 406void
405nfsd_idmap_shutdown(void) 407nfsd_idmap_shutdown(void)
406{ 408{
407 cache_unregister(&idtoname_cache); 409 if (cache_unregister(&idtoname_cache))
408 cache_unregister(&nametoid_cache); 410 printk(KERN_ERR "nfsd: failed to unregister idtoname cache\n");
411 if (cache_unregister(&nametoid_cache))
412 printk(KERN_ERR "nfsd: failed to unregister nametoid cache\n");
409} 413}
410 414
411/* 415/*
diff --git a/fs/open.c b/fs/open.c
index 32bf05e2996d..4ee2dcc31c28 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -933,16 +933,11 @@ void fastcall fd_install(unsigned int fd, struct file * file)
933 933
934EXPORT_SYMBOL(fd_install); 934EXPORT_SYMBOL(fd_install);
935 935
936asmlinkage long sys_open(const char __user * filename, int flags, int mode) 936long do_sys_open(const char __user *filename, int flags, int mode)
937{ 937{
938 char * tmp; 938 char *tmp = getname(filename);
939 int fd; 939 int fd = PTR_ERR(tmp);
940 940
941 if (force_o_largefile())
942 flags |= O_LARGEFILE;
943
944 tmp = getname(filename);
945 fd = PTR_ERR(tmp);
946 if (!IS_ERR(tmp)) { 941 if (!IS_ERR(tmp)) {
947 fd = get_unused_fd(); 942 fd = get_unused_fd();
948 if (fd >= 0) { 943 if (fd >= 0) {
@@ -959,6 +954,14 @@ asmlinkage long sys_open(const char __user * filename, int flags, int mode)
959 } 954 }
960 return fd; 955 return fd;
961} 956}
957
958asmlinkage long sys_open(const char __user *filename, int flags, int mode)
959{
960 if (force_o_largefile())
961 flags |= O_LARGEFILE;
962
963 return do_sys_open(filename, flags, mode);
964}
962EXPORT_SYMBOL_GPL(sys_open); 965EXPORT_SYMBOL_GPL(sys_open);
963 966
964#ifndef __alpha__ 967#ifndef __alpha__
diff --git a/fs/pipe.c b/fs/pipe.c
index 25aa09f9d09d..2c7a23dde2d8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -415,6 +415,10 @@ pipe_poll(struct file *filp, poll_table *wait)
415 415
416 if (filp->f_mode & FMODE_WRITE) { 416 if (filp->f_mode & FMODE_WRITE) {
417 mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0; 417 mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0;
418 /*
419 * Most Unices do not set POLLERR for FIFOs but on Linux they
420 * behave exactly like pipes for poll().
421 */
418 if (!PIPE_READERS(*inode)) 422 if (!PIPE_READERS(*inode))
419 mask |= POLLERR; 423 mask |= POLLERR;
420 } 424 }
@@ -422,9 +426,6 @@ pipe_poll(struct file *filp, poll_table *wait)
422 return mask; 426 return mask;
423} 427}
424 428
425/* FIXME: most Unices do not set POLLERR for fifos */
426#define fifo_poll pipe_poll
427
428static int 429static int
429pipe_release(struct inode *inode, int decr, int decw) 430pipe_release(struct inode *inode, int decr, int decw)
430{ 431{
@@ -568,7 +569,7 @@ struct file_operations read_fifo_fops = {
568 .read = pipe_read, 569 .read = pipe_read,
569 .readv = pipe_readv, 570 .readv = pipe_readv,
570 .write = bad_pipe_w, 571 .write = bad_pipe_w,
571 .poll = fifo_poll, 572 .poll = pipe_poll,
572 .ioctl = pipe_ioctl, 573 .ioctl = pipe_ioctl,
573 .open = pipe_read_open, 574 .open = pipe_read_open,
574 .release = pipe_read_release, 575 .release = pipe_read_release,
@@ -580,7 +581,7 @@ struct file_operations write_fifo_fops = {
580 .read = bad_pipe_r, 581 .read = bad_pipe_r,
581 .write = pipe_write, 582 .write = pipe_write,
582 .writev = pipe_writev, 583 .writev = pipe_writev,
583 .poll = fifo_poll, 584 .poll = pipe_poll,
584 .ioctl = pipe_ioctl, 585 .ioctl = pipe_ioctl,
585 .open = pipe_write_open, 586 .open = pipe_write_open,
586 .release = pipe_write_release, 587 .release = pipe_write_release,
@@ -593,7 +594,7 @@ struct file_operations rdwr_fifo_fops = {
593 .readv = pipe_readv, 594 .readv = pipe_readv,
594 .write = pipe_write, 595 .write = pipe_write,
595 .writev = pipe_writev, 596 .writev = pipe_writev,
596 .poll = fifo_poll, 597 .poll = pipe_poll,
597 .ioctl = pipe_ioctl, 598 .ioctl = pipe_ioctl,
598 .open = pipe_rdwr_open, 599 .open = pipe_rdwr_open,
599 .release = pipe_rdwr_release, 600 .release = pipe_rdwr_release,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 520978e49e92..84751f3f52d5 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -119,7 +119,6 @@ enum pid_directory_inos {
119#ifdef CONFIG_AUDITSYSCALL 119#ifdef CONFIG_AUDITSYSCALL
120 PROC_TGID_LOGINUID, 120 PROC_TGID_LOGINUID,
121#endif 121#endif
122 PROC_TGID_FD_DIR,
123 PROC_TGID_OOM_SCORE, 122 PROC_TGID_OOM_SCORE,
124 PROC_TGID_OOM_ADJUST, 123 PROC_TGID_OOM_ADJUST,
125 PROC_TID_INO, 124 PROC_TID_INO,
@@ -158,9 +157,11 @@ enum pid_directory_inos {
158#ifdef CONFIG_AUDITSYSCALL 157#ifdef CONFIG_AUDITSYSCALL
159 PROC_TID_LOGINUID, 158 PROC_TID_LOGINUID,
160#endif 159#endif
161 PROC_TID_FD_DIR = 0x8000, /* 0x8000-0xffff */
162 PROC_TID_OOM_SCORE, 160 PROC_TID_OOM_SCORE,
163 PROC_TID_OOM_ADJUST, 161 PROC_TID_OOM_ADJUST,
162
163 /* Add new entries before this */
164 PROC_TID_FD_DIR = 0x8000, /* 0x8000-0xffff */
164}; 165};
165 166
166struct pid_entry { 167struct pid_entry {
@@ -297,15 +298,21 @@ static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsm
297 return -ENOENT; 298 return -ENOENT;
298} 299}
299 300
300static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 301static struct fs_struct *get_fs_struct(struct task_struct *task)
301{ 302{
302 struct fs_struct *fs; 303 struct fs_struct *fs;
303 int result = -ENOENT; 304 task_lock(task);
304 task_lock(proc_task(inode)); 305 fs = task->fs;
305 fs = proc_task(inode)->fs;
306 if(fs) 306 if(fs)
307 atomic_inc(&fs->count); 307 atomic_inc(&fs->count);
308 task_unlock(proc_task(inode)); 308 task_unlock(task);
309 return fs;
310}
311
312static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
313{
314 struct fs_struct *fs = get_fs_struct(proc_task(inode));
315 int result = -ENOENT;
309 if (fs) { 316 if (fs) {
310 read_lock(&fs->lock); 317 read_lock(&fs->lock);
311 *mnt = mntget(fs->pwdmnt); 318 *mnt = mntget(fs->pwdmnt);
@@ -319,13 +326,8 @@ static int proc_cwd_link(struct inode *inode, struct dentry **dentry, struct vfs
319 326
320static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) 327static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
321{ 328{
322 struct fs_struct *fs; 329 struct fs_struct *fs = get_fs_struct(proc_task(inode));
323 int result = -ENOENT; 330 int result = -ENOENT;
324 task_lock(proc_task(inode));
325 fs = proc_task(inode)->fs;
326 if(fs)
327 atomic_inc(&fs->count);
328 task_unlock(proc_task(inode));
329 if (fs) { 331 if (fs) {
330 read_lock(&fs->lock); 332 read_lock(&fs->lock);
331 *mnt = mntget(fs->rootmnt); 333 *mnt = mntget(fs->rootmnt);
@@ -344,33 +346,6 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf
344 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \ 346 (task->state == TASK_STOPPED || task->state == TASK_TRACED) && \
345 security_ptrace(current,task) == 0)) 347 security_ptrace(current,task) == 0))
346 348
347static int may_ptrace_attach(struct task_struct *task)
348{
349 int retval = 0;
350
351 task_lock(task);
352
353 if (!task->mm)
354 goto out;
355 if (((current->uid != task->euid) ||
356 (current->uid != task->suid) ||
357 (current->uid != task->uid) ||
358 (current->gid != task->egid) ||
359 (current->gid != task->sgid) ||
360 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
361 goto out;
362 rmb();
363 if (task->mm->dumpable != 1 && !capable(CAP_SYS_PTRACE))
364 goto out;
365 if (security_ptrace(current, task))
366 goto out;
367
368 retval = 1;
369out:
370 task_unlock(task);
371 return retval;
372}
373
374static int proc_pid_environ(struct task_struct *task, char * buffer) 349static int proc_pid_environ(struct task_struct *task, char * buffer)
375{ 350{
376 int res = 0; 351 int res = 0;
@@ -380,7 +355,7 @@ static int proc_pid_environ(struct task_struct *task, char * buffer)
380 if (len > PAGE_SIZE) 355 if (len > PAGE_SIZE)
381 len = PAGE_SIZE; 356 len = PAGE_SIZE;
382 res = access_process_vm(task, mm->env_start, buffer, len, 0); 357 res = access_process_vm(task, mm->env_start, buffer, len, 0);
383 if (!may_ptrace_attach(task)) 358 if (!ptrace_may_attach(task))
384 res = -ESRCH; 359 res = -ESRCH;
385 mmput(mm); 360 mmput(mm);
386 } 361 }
@@ -683,7 +658,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
683 int ret = -ESRCH; 658 int ret = -ESRCH;
684 struct mm_struct *mm; 659 struct mm_struct *mm;
685 660
686 if (!MAY_PTRACE(task) || !may_ptrace_attach(task)) 661 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
687 goto out; 662 goto out;
688 663
689 ret = -ENOMEM; 664 ret = -ENOMEM;
@@ -709,7 +684,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
709 684
710 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 685 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
711 retval = access_process_vm(task, src, page, this_len, 0); 686 retval = access_process_vm(task, src, page, this_len, 0);
712 if (!retval || !MAY_PTRACE(task) || !may_ptrace_attach(task)) { 687 if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
713 if (!ret) 688 if (!ret)
714 ret = -EIO; 689 ret = -EIO;
715 break; 690 break;
@@ -747,7 +722,7 @@ static ssize_t mem_write(struct file * file, const char * buf,
747 struct task_struct *task = proc_task(file->f_dentry->d_inode); 722 struct task_struct *task = proc_task(file->f_dentry->d_inode);
748 unsigned long dst = *ppos; 723 unsigned long dst = *ppos;
749 724
750 if (!MAY_PTRACE(task) || !may_ptrace_attach(task)) 725 if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
751 return -ESRCH; 726 return -ESRCH;
752 727
753 page = (char *)__get_free_page(GFP_USER); 728 page = (char *)__get_free_page(GFP_USER);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index abe8920313fb..8a8c34461d48 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -249,6 +249,18 @@ out:
249 return error; 249 return error;
250} 250}
251 251
252static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry,
253 struct kstat *stat)
254{
255 struct inode *inode = dentry->d_inode;
256 struct proc_dir_entry *de = PROC_I(inode)->pde;
257 if (de && de->nlink)
258 inode->i_nlink = de->nlink;
259
260 generic_fillattr(inode, stat);
261 return 0;
262}
263
252static struct inode_operations proc_file_inode_operations = { 264static struct inode_operations proc_file_inode_operations = {
253 .setattr = proc_notify_change, 265 .setattr = proc_notify_change,
254}; 266};
@@ -475,6 +487,7 @@ static struct file_operations proc_dir_operations = {
475 */ 487 */
476static struct inode_operations proc_dir_inode_operations = { 488static struct inode_operations proc_dir_inode_operations = {
477 .lookup = proc_lookup, 489 .lookup = proc_lookup,
490 .getattr = proc_getattr,
478 .setattr = proc_notify_change, 491 .setattr = proc_notify_change,
479}; 492};
480 493
diff --git a/fs/read_write.c b/fs/read_write.c
index 563abd09b5c8..b60324aaa2b6 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -188,7 +188,7 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count
188 struct inode *inode; 188 struct inode *inode;
189 loff_t pos; 189 loff_t pos;
190 190
191 if (unlikely(count > file->f_maxcount)) 191 if (unlikely(count > INT_MAX))
192 goto Einval; 192 goto Einval;
193 pos = *ppos; 193 pos = *ppos;
194 if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) 194 if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index ca7989b04be3..a8e29e9bbbd0 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1034,7 +1034,7 @@ static int flush_commit_list(struct super_block *s,
1034 SB_ONDISK_JOURNAL_SIZE(s); 1034 SB_ONDISK_JOURNAL_SIZE(s);
1035 tbh = journal_find_get_block(s, bn); 1035 tbh = journal_find_get_block(s, bn);
1036 if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */ 1036 if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
1037 ll_rw_block(WRITE, 1, &tbh); 1037 ll_rw_block(SWRITE, 1, &tbh);
1038 put_bh(tbh); 1038 put_bh(tbh);
1039 } 1039 }
1040 atomic_dec(&journal->j_async_throttle); 1040 atomic_dec(&journal->j_async_throttle);
@@ -2172,7 +2172,7 @@ static int journal_read_transaction(struct super_block *p_s_sb,
2172 /* flush out the real blocks */ 2172 /* flush out the real blocks */
2173 for (i = 0; i < get_desc_trans_len(desc); i++) { 2173 for (i = 0; i < get_desc_trans_len(desc); i++) {
2174 set_buffer_dirty(real_blocks[i]); 2174 set_buffer_dirty(real_blocks[i]);
2175 ll_rw_block(WRITE, 1, real_blocks + i); 2175 ll_rw_block(SWRITE, 1, real_blocks + i);
2176 } 2176 }
2177 for (i = 0; i < get_desc_trans_len(desc); i++) { 2177 for (i = 0; i < get_desc_trans_len(desc); i++) {
2178 wait_on_buffer(real_blocks[i]); 2178 wait_on_buffer(real_blocks[i]);
diff --git a/fs/relayfs/Makefile b/fs/relayfs/Makefile
new file mode 100644
index 000000000000..e76e182cdb38
--- /dev/null
+++ b/fs/relayfs/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_RELAYFS_FS) += relayfs.o
2
3relayfs-y := relay.o inode.o buffers.o
4
diff --git a/fs/relayfs/buffers.c b/fs/relayfs/buffers.c
new file mode 100644
index 000000000000..2aa8e2719999
--- /dev/null
+++ b/fs/relayfs/buffers.c
@@ -0,0 +1,189 @@
1/*
2 * RelayFS buffer management code.
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
6 *
7 * This file is released under the GPL.
8 */
9
10#include <linux/module.h>
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/relayfs_fs.h>
14#include "relay.h"
15#include "buffers.h"
16
17/*
18 * close() vm_op implementation for relayfs file mapping.
19 */
20static void relay_file_mmap_close(struct vm_area_struct *vma)
21{
22 struct rchan_buf *buf = vma->vm_private_data;
23 buf->chan->cb->buf_unmapped(buf, vma->vm_file);
24}
25
26/*
27 * nopage() vm_op implementation for relayfs file mapping.
28 */
29static struct page *relay_buf_nopage(struct vm_area_struct *vma,
30 unsigned long address,
31 int *type)
32{
33 struct page *page;
34 struct rchan_buf *buf = vma->vm_private_data;
35 unsigned long offset = address - vma->vm_start;
36
37 if (address > vma->vm_end)
38 return NOPAGE_SIGBUS; /* Disallow mremap */
39 if (!buf)
40 return NOPAGE_OOM;
41
42 page = vmalloc_to_page(buf->start + offset);
43 if (!page)
44 return NOPAGE_OOM;
45 get_page(page);
46
47 if (type)
48 *type = VM_FAULT_MINOR;
49
50 return page;
51}
52
53/*
54 * vm_ops for relay file mappings.
55 */
56static struct vm_operations_struct relay_file_mmap_ops = {
57 .nopage = relay_buf_nopage,
58 .close = relay_file_mmap_close,
59};
60
61/**
62 * relay_mmap_buf: - mmap channel buffer to process address space
63 * @buf: relay channel buffer
64 * @vma: vm_area_struct describing memory to be mapped
65 *
66 * Returns 0 if ok, negative on error
67 *
68 * Caller should already have grabbed mmap_sem.
69 */
70int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma)
71{
72 unsigned long length = vma->vm_end - vma->vm_start;
73 struct file *filp = vma->vm_file;
74
75 if (!buf)
76 return -EBADF;
77
78 if (length != (unsigned long)buf->chan->alloc_size)
79 return -EINVAL;
80
81 vma->vm_ops = &relay_file_mmap_ops;
82 vma->vm_private_data = buf;
83 buf->chan->cb->buf_mapped(buf, filp);
84
85 return 0;
86}
87
88/**
89 * relay_alloc_buf - allocate a channel buffer
90 * @buf: the buffer struct
91 * @size: total size of the buffer
92 *
93 * Returns a pointer to the resulting buffer, NULL if unsuccessful
94 */
95static void *relay_alloc_buf(struct rchan_buf *buf, unsigned long size)
96{
97 void *mem;
98 unsigned int i, j, n_pages;
99
100 size = PAGE_ALIGN(size);
101 n_pages = size >> PAGE_SHIFT;
102
103 buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
104 if (!buf->page_array)
105 return NULL;
106
107 for (i = 0; i < n_pages; i++) {
108 buf->page_array[i] = alloc_page(GFP_KERNEL);
109 if (unlikely(!buf->page_array[i]))
110 goto depopulate;
111 }
112 mem = vmap(buf->page_array, n_pages, GFP_KERNEL, PAGE_KERNEL);
113 if (!mem)
114 goto depopulate;
115
116 memset(mem, 0, size);
117 buf->page_count = n_pages;
118 return mem;
119
120depopulate:
121 for (j = 0; j < i; j++)
122 __free_page(buf->page_array[j]);
123 kfree(buf->page_array);
124 return NULL;
125}
126
127/**
128 * relay_create_buf - allocate and initialize a channel buffer
129 * @alloc_size: size of the buffer to allocate
130 * @n_subbufs: number of sub-buffers in the channel
131 *
132 * Returns channel buffer if successful, NULL otherwise
133 */
134struct rchan_buf *relay_create_buf(struct rchan *chan)
135{
136 struct rchan_buf *buf = kcalloc(1, sizeof(struct rchan_buf), GFP_KERNEL);
137 if (!buf)
138 return NULL;
139
140 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
141 if (!buf->padding)
142 goto free_buf;
143
144 buf->start = relay_alloc_buf(buf, chan->alloc_size);
145 if (!buf->start)
146 goto free_buf;
147
148 buf->chan = chan;
149 kref_get(&buf->chan->kref);
150 return buf;
151
152free_buf:
153 kfree(buf->padding);
154 kfree(buf);
155 return NULL;
156}
157
158/**
159 * relay_destroy_buf - destroy an rchan_buf struct and associated buffer
160 * @buf: the buffer struct
161 */
162void relay_destroy_buf(struct rchan_buf *buf)
163{
164 struct rchan *chan = buf->chan;
165 unsigned int i;
166
167 if (likely(buf->start)) {
168 vunmap(buf->start);
169 for (i = 0; i < buf->page_count; i++)
170 __free_page(buf->page_array[i]);
171 kfree(buf->page_array);
172 }
173 kfree(buf->padding);
174 kfree(buf);
175 kref_put(&chan->kref, relay_destroy_channel);
176}
177
178/**
179 * relay_remove_buf - remove a channel buffer
180 *
181 * Removes the file from the relayfs fileystem, which also frees the
182 * rchan_buf_struct and the channel buffer. Should only be called from
183 * kref_put().
184 */
185void relay_remove_buf(struct kref *kref)
186{
187 struct rchan_buf *buf = container_of(kref, struct rchan_buf, kref);
188 relayfs_remove(buf->dentry);
189}
diff --git a/fs/relayfs/buffers.h b/fs/relayfs/buffers.h
new file mode 100644
index 000000000000..37a12493f641
--- /dev/null
+++ b/fs/relayfs/buffers.h
@@ -0,0 +1,12 @@
1#ifndef _BUFFERS_H
2#define _BUFFERS_H
3
4/* This inspired by rtai/shmem */
5#define FIX_SIZE(x) (((x) - 1) & PAGE_MASK) + PAGE_SIZE
6
7extern int relay_mmap_buf(struct rchan_buf *buf, struct vm_area_struct *vma);
8extern struct rchan_buf *relay_create_buf(struct rchan *chan);
9extern void relay_destroy_buf(struct rchan_buf *buf);
10extern void relay_remove_buf(struct kref *kref);
11
12#endif/* _BUFFERS_H */
diff --git a/fs/relayfs/inode.c b/fs/relayfs/inode.c
new file mode 100644
index 000000000000..0f7f88d067ad
--- /dev/null
+++ b/fs/relayfs/inode.c
@@ -0,0 +1,609 @@
1/*
2 * VFS-related code for RelayFS, a high-speed data relay filesystem.
3 *
4 * Copyright (C) 2003-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
5 * Copyright (C) 2003-2005 - Karim Yaghmour <karim@opersys.com>
6 *
7 * Based on ramfs, Copyright (C) 2002 - Linus Torvalds
8 *
9 * This file is released under the GPL.
10 */
11
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/mount.h>
15#include <linux/pagemap.h>
16#include <linux/init.h>
17#include <linux/string.h>
18#include <linux/backing-dev.h>
19#include <linux/namei.h>
20#include <linux/poll.h>
21#include <linux/relayfs_fs.h>
22#include "relay.h"
23#include "buffers.h"
24
25#define RELAYFS_MAGIC 0xF0B4A981
26
27static struct vfsmount * relayfs_mount;
28static int relayfs_mount_count;
29static kmem_cache_t * relayfs_inode_cachep;
30
31static struct backing_dev_info relayfs_backing_dev_info = {
32 .ra_pages = 0, /* No readahead */
33 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
34};
35
36static struct inode *relayfs_get_inode(struct super_block *sb, int mode,
37 struct rchan *chan)
38{
39 struct rchan_buf *buf = NULL;
40 struct inode *inode;
41
42 if (S_ISREG(mode)) {
43 BUG_ON(!chan);
44 buf = relay_create_buf(chan);
45 if (!buf)
46 return NULL;
47 }
48
49 inode = new_inode(sb);
50 if (!inode) {
51 relay_destroy_buf(buf);
52 return NULL;
53 }
54
55 inode->i_mode = mode;
56 inode->i_uid = 0;
57 inode->i_gid = 0;
58 inode->i_blksize = PAGE_CACHE_SIZE;
59 inode->i_blocks = 0;
60 inode->i_mapping->backing_dev_info = &relayfs_backing_dev_info;
61 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
62 switch (mode & S_IFMT) {
63 case S_IFREG:
64 inode->i_fop = &relayfs_file_operations;
65 RELAYFS_I(inode)->buf = buf;
66 break;
67 case S_IFDIR:
68 inode->i_op = &simple_dir_inode_operations;
69 inode->i_fop = &simple_dir_operations;
70
71 /* directory inodes start off with i_nlink == 2 (for "." entry) */
72 inode->i_nlink++;
73 break;
74 default:
75 break;
76 }
77
78 return inode;
79}
80
81/**
82 * relayfs_create_entry - create a relayfs directory or file
83 * @name: the name of the file to create
84 * @parent: parent directory
85 * @mode: mode
86 * @chan: relay channel associated with the file
87 *
88 * Returns the new dentry, NULL on failure
89 *
90 * Creates a file or directory with the specifed permissions.
91 */
92static struct dentry *relayfs_create_entry(const char *name,
93 struct dentry *parent,
94 int mode,
95 struct rchan *chan)
96{
97 struct dentry *d;
98 struct inode *inode;
99 int error = 0;
100
101 BUG_ON(!name || !(S_ISREG(mode) || S_ISDIR(mode)));
102
103 error = simple_pin_fs("relayfs", &relayfs_mount, &relayfs_mount_count);
104 if (error) {
105 printk(KERN_ERR "Couldn't mount relayfs: errcode %d\n", error);
106 return NULL;
107 }
108
109 if (!parent && relayfs_mount && relayfs_mount->mnt_sb)
110 parent = relayfs_mount->mnt_sb->s_root;
111
112 if (!parent) {
113 simple_release_fs(&relayfs_mount, &relayfs_mount_count);
114 return NULL;
115 }
116
117 parent = dget(parent);
118 down(&parent->d_inode->i_sem);
119 d = lookup_one_len(name, parent, strlen(name));
120 if (IS_ERR(d)) {
121 d = NULL;
122 goto release_mount;
123 }
124
125 if (d->d_inode) {
126 d = NULL;
127 goto release_mount;
128 }
129
130 inode = relayfs_get_inode(parent->d_inode->i_sb, mode, chan);
131 if (!inode) {
132 d = NULL;
133 goto release_mount;
134 }
135
136 d_instantiate(d, inode);
137 dget(d); /* Extra count - pin the dentry in core */
138
139 if (S_ISDIR(mode))
140 parent->d_inode->i_nlink++;
141
142 goto exit;
143
144release_mount:
145 simple_release_fs(&relayfs_mount, &relayfs_mount_count);
146
147exit:
148 up(&parent->d_inode->i_sem);
149 dput(parent);
150 return d;
151}
152
153/**
154 * relayfs_create_file - create a file in the relay filesystem
155 * @name: the name of the file to create
156 * @parent: parent directory
157 * @mode: mode, if not specied the default perms are used
158 * @chan: channel associated with the file
159 *
160 * Returns file dentry if successful, NULL otherwise.
161 *
162 * The file will be created user r on behalf of current user.
163 */
164struct dentry *relayfs_create_file(const char *name, struct dentry *parent,
165 int mode, struct rchan *chan)
166{
167 if (!mode)
168 mode = S_IRUSR;
169 mode = (mode & S_IALLUGO) | S_IFREG;
170
171 return relayfs_create_entry(name, parent, mode, chan);
172}
173
174/**
175 * relayfs_create_dir - create a directory in the relay filesystem
176 * @name: the name of the directory to create
177 * @parent: parent directory, NULL if parent should be fs root
178 *
179 * Returns directory dentry if successful, NULL otherwise.
180 *
181 * The directory will be created world rwx on behalf of current user.
182 */
183struct dentry *relayfs_create_dir(const char *name, struct dentry *parent)
184{
185 int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
186 return relayfs_create_entry(name, parent, mode, NULL);
187}
188
189/**
190 * relayfs_remove - remove a file or directory in the relay filesystem
191 * @dentry: file or directory dentry
192 *
193 * Returns 0 if successful, negative otherwise.
194 */
195int relayfs_remove(struct dentry *dentry)
196{
197 struct dentry *parent;
198 int error = 0;
199
200 if (!dentry)
201 return -EINVAL;
202 parent = dentry->d_parent;
203 if (!parent)
204 return -EINVAL;
205
206 parent = dget(parent);
207 down(&parent->d_inode->i_sem);
208 if (dentry->d_inode) {
209 if (S_ISDIR(dentry->d_inode->i_mode))
210 error = simple_rmdir(parent->d_inode, dentry);
211 else
212 error = simple_unlink(parent->d_inode, dentry);
213 if (!error)
214 d_delete(dentry);
215 }
216 if (!error)
217 dput(dentry);
218 up(&parent->d_inode->i_sem);
219 dput(parent);
220
221 if (!error)
222 simple_release_fs(&relayfs_mount, &relayfs_mount_count);
223
224 return error;
225}
226
227/**
228 * relayfs_remove_dir - remove a directory in the relay filesystem
229 * @dentry: directory dentry
230 *
231 * Returns 0 if successful, negative otherwise.
232 */
233int relayfs_remove_dir(struct dentry *dentry)
234{
235 return relayfs_remove(dentry);
236}
237
238/**
239 * relayfs_open - open file op for relayfs files
240 * @inode: the inode
241 * @filp: the file
242 *
243 * Increments the channel buffer refcount.
244 */
245static int relayfs_open(struct inode *inode, struct file *filp)
246{
247 struct rchan_buf *buf = RELAYFS_I(inode)->buf;
248 kref_get(&buf->kref);
249
250 return 0;
251}
252
253/**
254 * relayfs_mmap - mmap file op for relayfs files
255 * @filp: the file
256 * @vma: the vma describing what to map
257 *
258 * Calls upon relay_mmap_buf to map the file into user space.
259 */
260static int relayfs_mmap(struct file *filp, struct vm_area_struct *vma)
261{
262 struct inode *inode = filp->f_dentry->d_inode;
263 return relay_mmap_buf(RELAYFS_I(inode)->buf, vma);
264}
265
266/**
267 * relayfs_poll - poll file op for relayfs files
268 * @filp: the file
269 * @wait: poll table
270 *
271 * Poll implemention.
272 */
273static unsigned int relayfs_poll(struct file *filp, poll_table *wait)
274{
275 unsigned int mask = 0;
276 struct inode *inode = filp->f_dentry->d_inode;
277 struct rchan_buf *buf = RELAYFS_I(inode)->buf;
278
279 if (buf->finalized)
280 return POLLERR;
281
282 if (filp->f_mode & FMODE_READ) {
283 poll_wait(filp, &buf->read_wait, wait);
284 if (!relay_buf_empty(buf))
285 mask |= POLLIN | POLLRDNORM;
286 }
287
288 return mask;
289}
290
291/**
292 * relayfs_release - release file op for relayfs files
293 * @inode: the inode
294 * @filp: the file
295 *
296 * Decrements the channel refcount, as the filesystem is
297 * no longer using it.
298 */
299static int relayfs_release(struct inode *inode, struct file *filp)
300{
301 struct rchan_buf *buf = RELAYFS_I(inode)->buf;
302 kref_put(&buf->kref, relay_remove_buf);
303
304 return 0;
305}
306
307/**
308 * relayfs_read_consume - update the consumed count for the buffer
309 */
310static void relayfs_read_consume(struct rchan_buf *buf,
311 size_t read_pos,
312 size_t bytes_consumed)
313{
314 size_t subbuf_size = buf->chan->subbuf_size;
315 size_t n_subbufs = buf->chan->n_subbufs;
316 size_t read_subbuf;
317
318 if (buf->bytes_consumed + bytes_consumed > subbuf_size) {
319 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
320 buf->bytes_consumed = 0;
321 }
322
323 buf->bytes_consumed += bytes_consumed;
324 read_subbuf = read_pos / buf->chan->subbuf_size;
325 if (buf->bytes_consumed + buf->padding[read_subbuf] == subbuf_size) {
326 if ((read_subbuf == buf->subbufs_produced % n_subbufs) &&
327 (buf->offset == subbuf_size))
328 return;
329 relay_subbufs_consumed(buf->chan, buf->cpu, 1);
330 buf->bytes_consumed = 0;
331 }
332}
333
334/**
335 * relayfs_read_avail - boolean, are there unconsumed bytes available?
336 */
337static int relayfs_read_avail(struct rchan_buf *buf, size_t read_pos)
338{
339 size_t bytes_produced, bytes_consumed, write_offset;
340 size_t subbuf_size = buf->chan->subbuf_size;
341 size_t n_subbufs = buf->chan->n_subbufs;
342 size_t produced = buf->subbufs_produced % n_subbufs;
343 size_t consumed = buf->subbufs_consumed % n_subbufs;
344
345 write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
346
347 if (consumed > produced) {
348 if ((produced > n_subbufs) &&
349 (produced + n_subbufs - consumed <= n_subbufs))
350 produced += n_subbufs;
351 } else if (consumed == produced) {
352 if (buf->offset > subbuf_size) {
353 produced += n_subbufs;
354 if (buf->subbufs_produced == buf->subbufs_consumed)
355 consumed += n_subbufs;
356 }
357 }
358
359 if (buf->offset > subbuf_size)
360 bytes_produced = (produced - 1) * subbuf_size + write_offset;
361 else
362 bytes_produced = produced * subbuf_size + write_offset;
363 bytes_consumed = consumed * subbuf_size + buf->bytes_consumed;
364
365 if (bytes_produced == bytes_consumed)
366 return 0;
367
368 relayfs_read_consume(buf, read_pos, 0);
369
370 return 1;
371}
372
373/**
374 * relayfs_read_subbuf_avail - return bytes available in sub-buffer
375 */
376static size_t relayfs_read_subbuf_avail(size_t read_pos,
377 struct rchan_buf *buf)
378{
379 size_t padding, avail = 0;
380 size_t read_subbuf, read_offset, write_subbuf, write_offset;
381 size_t subbuf_size = buf->chan->subbuf_size;
382
383 write_subbuf = (buf->data - buf->start) / subbuf_size;
384 write_offset = buf->offset > subbuf_size ? subbuf_size : buf->offset;
385 read_subbuf = read_pos / subbuf_size;
386 read_offset = read_pos % subbuf_size;
387 padding = buf->padding[read_subbuf];
388
389 if (read_subbuf == write_subbuf) {
390 if (read_offset + padding < write_offset)
391 avail = write_offset - (read_offset + padding);
392 } else
393 avail = (subbuf_size - padding) - read_offset;
394
395 return avail;
396}
397
398/**
399 * relayfs_read_start_pos - find the first available byte to read
400 *
401 * If the read_pos is in the middle of padding, return the
402 * position of the first actually available byte, otherwise
403 * return the original value.
404 */
405static size_t relayfs_read_start_pos(size_t read_pos,
406 struct rchan_buf *buf)
407{
408 size_t read_subbuf, padding, padding_start, padding_end;
409 size_t subbuf_size = buf->chan->subbuf_size;
410 size_t n_subbufs = buf->chan->n_subbufs;
411
412 read_subbuf = read_pos / subbuf_size;
413 padding = buf->padding[read_subbuf];
414 padding_start = (read_subbuf + 1) * subbuf_size - padding;
415 padding_end = (read_subbuf + 1) * subbuf_size;
416 if (read_pos >= padding_start && read_pos < padding_end) {
417 read_subbuf = (read_subbuf + 1) % n_subbufs;
418 read_pos = read_subbuf * subbuf_size;
419 }
420
421 return read_pos;
422}
423
424/**
425 * relayfs_read_end_pos - return the new read position
426 */
427static size_t relayfs_read_end_pos(struct rchan_buf *buf,
428 size_t read_pos,
429 size_t count)
430{
431 size_t read_subbuf, padding, end_pos;
432 size_t subbuf_size = buf->chan->subbuf_size;
433 size_t n_subbufs = buf->chan->n_subbufs;
434
435 read_subbuf = read_pos / subbuf_size;
436 padding = buf->padding[read_subbuf];
437 if (read_pos % subbuf_size + count + padding == subbuf_size)
438 end_pos = (read_subbuf + 1) * subbuf_size;
439 else
440 end_pos = read_pos + count;
441 if (end_pos >= subbuf_size * n_subbufs)
442 end_pos = 0;
443
444 return end_pos;
445}
446
447/**
448 * relayfs_read - read file op for relayfs files
449 * @filp: the file
450 * @buffer: the userspace buffer
451 * @count: number of bytes to read
452 * @ppos: position to read from
453 *
454 * Reads count bytes or the number of bytes available in the
455 * current sub-buffer being read, whichever is smaller.
456 */
457static ssize_t relayfs_read(struct file *filp,
458 char __user *buffer,
459 size_t count,
460 loff_t *ppos)
461{
462 struct inode *inode = filp->f_dentry->d_inode;
463 struct rchan_buf *buf = RELAYFS_I(inode)->buf;
464 size_t read_start, avail;
465 ssize_t ret = 0;
466 void *from;
467
468 down(&inode->i_sem);
469 if(!relayfs_read_avail(buf, *ppos))
470 goto out;
471
472 read_start = relayfs_read_start_pos(*ppos, buf);
473 avail = relayfs_read_subbuf_avail(read_start, buf);
474 if (!avail)
475 goto out;
476
477 from = buf->start + read_start;
478 ret = count = min(count, avail);
479 if (copy_to_user(buffer, from, count)) {
480 ret = -EFAULT;
481 goto out;
482 }
483 relayfs_read_consume(buf, read_start, count);
484 *ppos = relayfs_read_end_pos(buf, read_start, count);
485out:
486 up(&inode->i_sem);
487 return ret;
488}
489
490/**
491 * relayfs alloc_inode() implementation
492 */
493static struct inode *relayfs_alloc_inode(struct super_block *sb)
494{
495 struct relayfs_inode_info *p = kmem_cache_alloc(relayfs_inode_cachep, SLAB_KERNEL);
496 if (!p)
497 return NULL;
498 p->buf = NULL;
499
500 return &p->vfs_inode;
501}
502
503/**
504 * relayfs destroy_inode() implementation
505 */
506static void relayfs_destroy_inode(struct inode *inode)
507{
508 if (RELAYFS_I(inode)->buf)
509 relay_destroy_buf(RELAYFS_I(inode)->buf);
510
511 kmem_cache_free(relayfs_inode_cachep, RELAYFS_I(inode));
512}
513
514static void init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
515{
516 struct relayfs_inode_info *i = p;
517 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == SLAB_CTOR_CONSTRUCTOR)
518 inode_init_once(&i->vfs_inode);
519}
520
521struct file_operations relayfs_file_operations = {
522 .open = relayfs_open,
523 .poll = relayfs_poll,
524 .mmap = relayfs_mmap,
525 .read = relayfs_read,
526 .llseek = no_llseek,
527 .release = relayfs_release,
528};
529
530static struct super_operations relayfs_ops = {
531 .statfs = simple_statfs,
532 .drop_inode = generic_delete_inode,
533 .alloc_inode = relayfs_alloc_inode,
534 .destroy_inode = relayfs_destroy_inode,
535};
536
537static int relayfs_fill_super(struct super_block * sb, void * data, int silent)
538{
539 struct inode *inode;
540 struct dentry *root;
541 int mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
542
543 sb->s_blocksize = PAGE_CACHE_SIZE;
544 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
545 sb->s_magic = RELAYFS_MAGIC;
546 sb->s_op = &relayfs_ops;
547 inode = relayfs_get_inode(sb, mode, NULL);
548
549 if (!inode)
550 return -ENOMEM;
551
552 root = d_alloc_root(inode);
553 if (!root) {
554 iput(inode);
555 return -ENOMEM;
556 }
557 sb->s_root = root;
558
559 return 0;
560}
561
562static struct super_block * relayfs_get_sb(struct file_system_type *fs_type,
563 int flags, const char *dev_name,
564 void *data)
565{
566 return get_sb_single(fs_type, flags, data, relayfs_fill_super);
567}
568
569static struct file_system_type relayfs_fs_type = {
570 .owner = THIS_MODULE,
571 .name = "relayfs",
572 .get_sb = relayfs_get_sb,
573 .kill_sb = kill_litter_super,
574};
575
576static int __init init_relayfs_fs(void)
577{
578 int err;
579
580 relayfs_inode_cachep = kmem_cache_create("relayfs_inode_cache",
581 sizeof(struct relayfs_inode_info), 0,
582 0, init_once, NULL);
583 if (!relayfs_inode_cachep)
584 return -ENOMEM;
585
586 err = register_filesystem(&relayfs_fs_type);
587 if (err)
588 kmem_cache_destroy(relayfs_inode_cachep);
589
590 return err;
591}
592
593static void __exit exit_relayfs_fs(void)
594{
595 unregister_filesystem(&relayfs_fs_type);
596 kmem_cache_destroy(relayfs_inode_cachep);
597}
598
599module_init(init_relayfs_fs)
600module_exit(exit_relayfs_fs)
601
602EXPORT_SYMBOL_GPL(relayfs_file_operations);
603EXPORT_SYMBOL_GPL(relayfs_create_dir);
604EXPORT_SYMBOL_GPL(relayfs_remove_dir);
605
606MODULE_AUTHOR("Tom Zanussi <zanussi@us.ibm.com> and Karim Yaghmour <karim@opersys.com>");
607MODULE_DESCRIPTION("Relay Filesystem");
608MODULE_LICENSE("GPL");
609
diff --git a/fs/relayfs/relay.c b/fs/relayfs/relay.c
new file mode 100644
index 000000000000..16446a15c96d
--- /dev/null
+++ b/fs/relayfs/relay.c
@@ -0,0 +1,431 @@
1/*
2 * Public API and common code for RelayFS.
3 *
4 * See Documentation/filesystems/relayfs.txt for an overview of relayfs.
5 *
6 * Copyright (C) 2002-2005 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
7 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
8 *
9 * This file is released under the GPL.
10 */
11
12#include <linux/errno.h>
13#include <linux/stddef.h>
14#include <linux/slab.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/relayfs_fs.h>
18#include "relay.h"
19#include "buffers.h"
20
21/**
22 * relay_buf_empty - boolean, is the channel buffer empty?
23 * @buf: channel buffer
24 *
25 * Returns 1 if the buffer is empty, 0 otherwise.
26 */
27int relay_buf_empty(struct rchan_buf *buf)
28{
29 return (buf->subbufs_produced - buf->subbufs_consumed) ? 0 : 1;
30}
31
32/**
33 * relay_buf_full - boolean, is the channel buffer full?
34 * @buf: channel buffer
35 *
36 * Returns 1 if the buffer is full, 0 otherwise.
37 */
38int relay_buf_full(struct rchan_buf *buf)
39{
40 size_t ready = buf->subbufs_produced - buf->subbufs_consumed;
41 return (ready >= buf->chan->n_subbufs) ? 1 : 0;
42}
43
44/*
45 * High-level relayfs kernel API and associated functions.
46 */
47
48/*
49 * rchan_callback implementations defining default channel behavior. Used
50 * in place of corresponding NULL values in client callback struct.
51 */
52
53/*
54 * subbuf_start() default callback. Does nothing.
55 */
56static int subbuf_start_default_callback (struct rchan_buf *buf,
57 void *subbuf,
58 void *prev_subbuf,
59 size_t prev_padding)
60{
61 if (relay_buf_full(buf))
62 return 0;
63
64 return 1;
65}
66
67/*
68 * buf_mapped() default callback. Does nothing.
69 */
70static void buf_mapped_default_callback(struct rchan_buf *buf,
71 struct file *filp)
72{
73}
74
75/*
76 * buf_unmapped() default callback. Does nothing.
77 */
78static void buf_unmapped_default_callback(struct rchan_buf *buf,
79 struct file *filp)
80{
81}
82
83/* relay channel default callbacks */
84static struct rchan_callbacks default_channel_callbacks = {
85 .subbuf_start = subbuf_start_default_callback,
86 .buf_mapped = buf_mapped_default_callback,
87 .buf_unmapped = buf_unmapped_default_callback,
88};
89
90/**
91 * wakeup_readers - wake up readers waiting on a channel
92 * @private: the channel buffer
93 *
94 * This is the work function used to defer reader waking. The
95 * reason waking is deferred is that calling directly from write
96 * causes problems if you're writing from say the scheduler.
97 */
98static void wakeup_readers(void *private)
99{
100 struct rchan_buf *buf = private;
101 wake_up_interruptible(&buf->read_wait);
102}
103
104/**
105 * __relay_reset - reset a channel buffer
106 * @buf: the channel buffer
107 * @init: 1 if this is a first-time initialization
108 *
109 * See relay_reset for description of effect.
110 */
111static inline void __relay_reset(struct rchan_buf *buf, unsigned int init)
112{
113 size_t i;
114
115 if (init) {
116 init_waitqueue_head(&buf->read_wait);
117 kref_init(&buf->kref);
118 INIT_WORK(&buf->wake_readers, NULL, NULL);
119 } else {
120 cancel_delayed_work(&buf->wake_readers);
121 flush_scheduled_work();
122 }
123
124 buf->subbufs_produced = 0;
125 buf->subbufs_consumed = 0;
126 buf->bytes_consumed = 0;
127 buf->finalized = 0;
128 buf->data = buf->start;
129 buf->offset = 0;
130
131 for (i = 0; i < buf->chan->n_subbufs; i++)
132 buf->padding[i] = 0;
133
134 buf->chan->cb->subbuf_start(buf, buf->data, NULL, 0);
135}
136
137/**
138 * relay_reset - reset the channel
139 * @chan: the channel
140 *
141 * This has the effect of erasing all data from all channel buffers
142 * and restarting the channel in its initial state. The buffers
143 * are not freed, so any mappings are still in effect.
144 *
145 * NOTE: Care should be taken that the channel isn't actually
146 * being used by anything when this call is made.
147 */
148void relay_reset(struct rchan *chan)
149{
150 unsigned int i;
151
152 if (!chan)
153 return;
154
155 for (i = 0; i < NR_CPUS; i++) {
156 if (!chan->buf[i])
157 continue;
158 __relay_reset(chan->buf[i], 0);
159 }
160}
161
162/**
163 * relay_open_buf - create a new channel buffer in relayfs
164 *
165 * Internal - used by relay_open().
166 */
167static struct rchan_buf *relay_open_buf(struct rchan *chan,
168 const char *filename,
169 struct dentry *parent)
170{
171 struct rchan_buf *buf;
172 struct dentry *dentry;
173
174 /* Create file in fs */
175 dentry = relayfs_create_file(filename, parent, S_IRUSR, chan);
176 if (!dentry)
177 return NULL;
178
179 buf = RELAYFS_I(dentry->d_inode)->buf;
180 buf->dentry = dentry;
181 __relay_reset(buf, 1);
182
183 return buf;
184}
185
186/**
187 * relay_close_buf - close a channel buffer
188 * @buf: channel buffer
189 *
190 * Marks the buffer finalized and restores the default callbacks.
191 * The channel buffer and channel buffer data structure are then freed
192 * automatically when the last reference is given up.
193 */
194static inline void relay_close_buf(struct rchan_buf *buf)
195{
196 buf->finalized = 1;
197 buf->chan->cb = &default_channel_callbacks;
198 cancel_delayed_work(&buf->wake_readers);
199 flush_scheduled_work();
200 kref_put(&buf->kref, relay_remove_buf);
201}
202
203static inline void setup_callbacks(struct rchan *chan,
204 struct rchan_callbacks *cb)
205{
206 if (!cb) {
207 chan->cb = &default_channel_callbacks;
208 return;
209 }
210
211 if (!cb->subbuf_start)
212 cb->subbuf_start = subbuf_start_default_callback;
213 if (!cb->buf_mapped)
214 cb->buf_mapped = buf_mapped_default_callback;
215 if (!cb->buf_unmapped)
216 cb->buf_unmapped = buf_unmapped_default_callback;
217 chan->cb = cb;
218}
219
220/**
221 * relay_open - create a new relayfs channel
222 * @base_filename: base name of files to create
223 * @parent: dentry of parent directory, NULL for root directory
224 * @subbuf_size: size of sub-buffers
225 * @n_subbufs: number of sub-buffers
226 * @cb: client callback functions
227 *
228 * Returns channel pointer if successful, NULL otherwise.
229 *
230 * Creates a channel buffer for each cpu using the sizes and
231 * attributes specified. The created channel buffer files
232 * will be named base_filename0...base_filenameN-1. File
233 * permissions will be S_IRUSR.
234 */
235struct rchan *relay_open(const char *base_filename,
236 struct dentry *parent,
237 size_t subbuf_size,
238 size_t n_subbufs,
239 struct rchan_callbacks *cb)
240{
241 unsigned int i;
242 struct rchan *chan;
243 char *tmpname;
244
245 if (!base_filename)
246 return NULL;
247
248 if (!(subbuf_size && n_subbufs))
249 return NULL;
250
251 chan = kcalloc(1, sizeof(struct rchan), GFP_KERNEL);
252 if (!chan)
253 return NULL;
254
255 chan->version = RELAYFS_CHANNEL_VERSION;
256 chan->n_subbufs = n_subbufs;
257 chan->subbuf_size = subbuf_size;
258 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
259 setup_callbacks(chan, cb);
260 kref_init(&chan->kref);
261
262 tmpname = kmalloc(NAME_MAX + 1, GFP_KERNEL);
263 if (!tmpname)
264 goto free_chan;
265
266 for_each_online_cpu(i) {
267 sprintf(tmpname, "%s%d", base_filename, i);
268 chan->buf[i] = relay_open_buf(chan, tmpname, parent);
269 chan->buf[i]->cpu = i;
270 if (!chan->buf[i])
271 goto free_bufs;
272 }
273
274 kfree(tmpname);
275 return chan;
276
277free_bufs:
278 for (i = 0; i < NR_CPUS; i++) {
279 if (!chan->buf[i])
280 break;
281 relay_close_buf(chan->buf[i]);
282 }
283 kfree(tmpname);
284
285free_chan:
286 kref_put(&chan->kref, relay_destroy_channel);
287 return NULL;
288}
289
290/**
291 * relay_switch_subbuf - switch to a new sub-buffer
292 * @buf: channel buffer
293 * @length: size of current event
294 *
295 * Returns either the length passed in or 0 if full.
296
297 * Performs sub-buffer-switch tasks such as invoking callbacks,
298 * updating padding counts, waking up readers, etc.
299 */
300size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
301{
302 void *old, *new;
303 size_t old_subbuf, new_subbuf;
304
305 if (unlikely(length > buf->chan->subbuf_size))
306 goto toobig;
307
308 if (buf->offset != buf->chan->subbuf_size + 1) {
309 buf->prev_padding = buf->chan->subbuf_size - buf->offset;
310 old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
311 buf->padding[old_subbuf] = buf->prev_padding;
312 buf->subbufs_produced++;
313 if (waitqueue_active(&buf->read_wait)) {
314 PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf);
315 schedule_delayed_work(&buf->wake_readers, 1);
316 }
317 }
318
319 old = buf->data;
320 new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
321 new = buf->start + new_subbuf * buf->chan->subbuf_size;
322 buf->offset = 0;
323 if (!buf->chan->cb->subbuf_start(buf, new, old, buf->prev_padding)) {
324 buf->offset = buf->chan->subbuf_size + 1;
325 return 0;
326 }
327 buf->data = new;
328 buf->padding[new_subbuf] = 0;
329
330 if (unlikely(length + buf->offset > buf->chan->subbuf_size))
331 goto toobig;
332
333 return length;
334
335toobig:
336 printk(KERN_WARNING "relayfs: event too large (%Zd)\n", length);
337 WARN_ON(1);
338 return 0;
339}
340
341/**
342 * relay_subbufs_consumed - update the buffer's sub-buffers-consumed count
343 * @chan: the channel
344 * @cpu: the cpu associated with the channel buffer to update
345 * @subbufs_consumed: number of sub-buffers to add to current buf's count
346 *
347 * Adds to the channel buffer's consumed sub-buffer count.
348 * subbufs_consumed should be the number of sub-buffers newly consumed,
349 * not the total consumed.
350 *
351 * NOTE: kernel clients don't need to call this function if the channel
352 * mode is 'overwrite'.
353 */
354void relay_subbufs_consumed(struct rchan *chan,
355 unsigned int cpu,
356 size_t subbufs_consumed)
357{
358 struct rchan_buf *buf;
359
360 if (!chan)
361 return;
362
363 if (cpu >= NR_CPUS || !chan->buf[cpu])
364 return;
365
366 buf = chan->buf[cpu];
367 buf->subbufs_consumed += subbufs_consumed;
368 if (buf->subbufs_consumed > buf->subbufs_produced)
369 buf->subbufs_consumed = buf->subbufs_produced;
370}
371
372/**
373 * relay_destroy_channel - free the channel struct
374 *
375 * Should only be called from kref_put().
376 */
377void relay_destroy_channel(struct kref *kref)
378{
379 struct rchan *chan = container_of(kref, struct rchan, kref);
380 kfree(chan);
381}
382
383/**
384 * relay_close - close the channel
385 * @chan: the channel
386 *
387 * Closes all channel buffers and frees the channel.
388 */
389void relay_close(struct rchan *chan)
390{
391 unsigned int i;
392
393 if (!chan)
394 return;
395
396 for (i = 0; i < NR_CPUS; i++) {
397 if (!chan->buf[i])
398 continue;
399 relay_close_buf(chan->buf[i]);
400 }
401
402 kref_put(&chan->kref, relay_destroy_channel);
403}
404
405/**
406 * relay_flush - close the channel
407 * @chan: the channel
408 *
409 * Flushes all channel buffers i.e. forces buffer switch.
410 */
411void relay_flush(struct rchan *chan)
412{
413 unsigned int i;
414
415 if (!chan)
416 return;
417
418 for (i = 0; i < NR_CPUS; i++) {
419 if (!chan->buf[i])
420 continue;
421 relay_switch_subbuf(chan->buf[i], 0);
422 }
423}
424
425EXPORT_SYMBOL_GPL(relay_open);
426EXPORT_SYMBOL_GPL(relay_close);
427EXPORT_SYMBOL_GPL(relay_flush);
428EXPORT_SYMBOL_GPL(relay_reset);
429EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
430EXPORT_SYMBOL_GPL(relay_switch_subbuf);
431EXPORT_SYMBOL_GPL(relay_buf_full);
diff --git a/fs/relayfs/relay.h b/fs/relayfs/relay.h
new file mode 100644
index 000000000000..703503fa22b6
--- /dev/null
+++ b/fs/relayfs/relay.h
@@ -0,0 +1,12 @@
1#ifndef _RELAY_H
2#define _RELAY_H
3
4struct dentry *relayfs_create_file(const char *name,
5 struct dentry *parent,
6 int mode,
7 struct rchan *chan);
8extern int relayfs_remove(struct dentry *dentry);
9extern int relay_buf_empty(struct rchan_buf *buf);
10extern void relay_destroy_channel(struct kref *kref);
11
12#endif /* _RELAY_H */
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 997640c99c7d..faf1512173eb 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -114,8 +114,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
114 ubh_mark_buffer_dirty (USPI_UBH); 114 ubh_mark_buffer_dirty (USPI_UBH);
115 ubh_mark_buffer_dirty (UCPI_UBH); 115 ubh_mark_buffer_dirty (UCPI_UBH);
116 if (sb->s_flags & MS_SYNCHRONOUS) { 116 if (sb->s_flags & MS_SYNCHRONOUS) {
117 ubh_wait_on_buffer (UCPI_UBH); 117 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
118 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
119 ubh_wait_on_buffer (UCPI_UBH); 118 ubh_wait_on_buffer (UCPI_UBH);
120 } 119 }
121 sb->s_dirt = 1; 120 sb->s_dirt = 1;
@@ -200,8 +199,7 @@ do_more:
200 ubh_mark_buffer_dirty (USPI_UBH); 199 ubh_mark_buffer_dirty (USPI_UBH);
201 ubh_mark_buffer_dirty (UCPI_UBH); 200 ubh_mark_buffer_dirty (UCPI_UBH);
202 if (sb->s_flags & MS_SYNCHRONOUS) { 201 if (sb->s_flags & MS_SYNCHRONOUS) {
203 ubh_wait_on_buffer (UCPI_UBH); 202 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
204 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
205 ubh_wait_on_buffer (UCPI_UBH); 203 ubh_wait_on_buffer (UCPI_UBH);
206 } 204 }
207 205
@@ -459,8 +457,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
459 ubh_mark_buffer_dirty (USPI_UBH); 457 ubh_mark_buffer_dirty (USPI_UBH);
460 ubh_mark_buffer_dirty (UCPI_UBH); 458 ubh_mark_buffer_dirty (UCPI_UBH);
461 if (sb->s_flags & MS_SYNCHRONOUS) { 459 if (sb->s_flags & MS_SYNCHRONOUS) {
462 ubh_wait_on_buffer (UCPI_UBH); 460 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
463 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
464 ubh_wait_on_buffer (UCPI_UBH); 461 ubh_wait_on_buffer (UCPI_UBH);
465 } 462 }
466 sb->s_dirt = 1; 463 sb->s_dirt = 1;
@@ -585,8 +582,7 @@ succed:
585 ubh_mark_buffer_dirty (USPI_UBH); 582 ubh_mark_buffer_dirty (USPI_UBH);
586 ubh_mark_buffer_dirty (UCPI_UBH); 583 ubh_mark_buffer_dirty (UCPI_UBH);
587 if (sb->s_flags & MS_SYNCHRONOUS) { 584 if (sb->s_flags & MS_SYNCHRONOUS) {
588 ubh_wait_on_buffer (UCPI_UBH); 585 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **)&ucpi);
589 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
590 ubh_wait_on_buffer (UCPI_UBH); 586 ubh_wait_on_buffer (UCPI_UBH);
591 } 587 }
592 sb->s_dirt = 1; 588 sb->s_dirt = 1;
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 61a6b1542fc5..0938945b9cbc 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -124,8 +124,7 @@ void ufs_free_inode (struct inode * inode)
124 ubh_mark_buffer_dirty (USPI_UBH); 124 ubh_mark_buffer_dirty (USPI_UBH);
125 ubh_mark_buffer_dirty (UCPI_UBH); 125 ubh_mark_buffer_dirty (UCPI_UBH);
126 if (sb->s_flags & MS_SYNCHRONOUS) { 126 if (sb->s_flags & MS_SYNCHRONOUS) {
127 ubh_wait_on_buffer (UCPI_UBH); 127 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **) &ucpi);
128 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
129 ubh_wait_on_buffer (UCPI_UBH); 128 ubh_wait_on_buffer (UCPI_UBH);
130 } 129 }
131 130
@@ -249,8 +248,7 @@ cg_found:
249 ubh_mark_buffer_dirty (USPI_UBH); 248 ubh_mark_buffer_dirty (USPI_UBH);
250 ubh_mark_buffer_dirty (UCPI_UBH); 249 ubh_mark_buffer_dirty (UCPI_UBH);
251 if (sb->s_flags & MS_SYNCHRONOUS) { 250 if (sb->s_flags & MS_SYNCHRONOUS) {
252 ubh_wait_on_buffer (UCPI_UBH); 251 ubh_ll_rw_block (SWRITE, 1, (struct ufs_buffer_head **) &ucpi);
253 ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
254 ubh_wait_on_buffer (UCPI_UBH); 252 ubh_wait_on_buffer (UCPI_UBH);
255 } 253 }
256 sb->s_dirt = 1; 254 sb->s_dirt = 1;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index e312bf8bad9f..61d2e35012a4 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -285,8 +285,7 @@ next:;
285 } 285 }
286 } 286 }
287 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) { 287 if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
288 ubh_wait_on_buffer (ind_ubh); 288 ubh_ll_rw_block (SWRITE, 1, &ind_ubh);
289 ubh_ll_rw_block (WRITE, 1, &ind_ubh);
290 ubh_wait_on_buffer (ind_ubh); 289 ubh_wait_on_buffer (ind_ubh);
291 } 290 }
292 ubh_brelse (ind_ubh); 291 ubh_brelse (ind_ubh);
@@ -353,8 +352,7 @@ static int ufs_trunc_dindirect (struct inode *inode, unsigned offset, __fs32 *p)
353 } 352 }
354 } 353 }
355 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) { 354 if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
356 ubh_wait_on_buffer (dind_bh); 355 ubh_ll_rw_block (SWRITE, 1, &dind_bh);
357 ubh_ll_rw_block (WRITE, 1, &dind_bh);
358 ubh_wait_on_buffer (dind_bh); 356 ubh_wait_on_buffer (dind_bh);
359 } 357 }
360 ubh_brelse (dind_bh); 358 ubh_brelse (dind_bh);
@@ -418,8 +416,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
418 } 416 }
419 } 417 }
420 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) { 418 if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
421 ubh_wait_on_buffer (tind_bh); 419 ubh_ll_rw_block (SWRITE, 1, &tind_bh);
422 ubh_ll_rw_block (WRITE, 1, &tind_bh);
423 ubh_wait_on_buffer (tind_bh); 420 ubh_wait_on_buffer (tind_bh);
424 } 421 }
425 ubh_brelse (tind_bh); 422 ubh_brelse (tind_bh);
diff --git a/fs/umsdos/notes b/fs/umsdos/notes
deleted file mode 100644
index 3c47d1f4fc47..000000000000
--- a/fs/umsdos/notes
+++ /dev/null
@@ -1,17 +0,0 @@
1This file contain idea and things I don't want to forget
2
3Possible bug in fs/read_write.c
4Function sys_readdir()
5
6 There is a call the verify_area that does not take in account
7 the count parameter. I guess it should read
8
9 error = verify_area(VERIFY_WRITE, dirent, count*sizeof (*dirent));
10
11 instead of
12
13 error = verify_area(VERIFY_WRITE, dirent, sizeof (*dirent));
14
15 Of course, now , count is always 1
16
17
diff --git a/fs/xattr.c b/fs/xattr.c
index dc8bc7624f26..3f9c64bea151 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -325,6 +325,8 @@ removexattr(struct dentry *d, char __user *name)
325 down(&d->d_inode->i_sem); 325 down(&d->d_inode->i_sem);
326 error = d->d_inode->i_op->removexattr(d, kname); 326 error = d->d_inode->i_op->removexattr(d, kname);
327 up(&d->d_inode->i_sem); 327 up(&d->d_inode->i_sem);
328 if (!error)
329 fsnotify_xattr(d);
328 } 330 }
329out: 331out:
330 return error; 332 return error;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index d3ff78354638..49e3e7e5e3dc 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -1,150 +1 @@
1# include $(TOPDIR)/fs/xfs/Makefile-linux-$(VERSION).$(PATCHLEVEL)
2# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of version 2 of the GNU General Public License as
6# published by the Free Software Foundation.
7#
8# This program is distributed in the hope that it would be useful, but
9# WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11#
12# Further, this software is distributed without any warranty that it is
13# free of the rightful claim of any third person regarding infringement
14# or the like. Any license provided herein, whether implied or
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31#
32
33EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
34
35ifeq ($(CONFIG_XFS_DEBUG),y)
36 EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
37 EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
38endif
39ifeq ($(CONFIG_XFS_TRACE),y)
40 EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
41 EXTRA_CFLAGS += -DXFS_ATTR_TRACE
42 EXTRA_CFLAGS += -DXFS_BLI_TRACE
43 EXTRA_CFLAGS += -DXFS_BMAP_TRACE
44 EXTRA_CFLAGS += -DXFS_BMBT_TRACE
45 EXTRA_CFLAGS += -DXFS_DIR_TRACE
46 EXTRA_CFLAGS += -DXFS_DIR2_TRACE
47 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
48 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
49 EXTRA_CFLAGS += -DXFS_LOG_TRACE
50 EXTRA_CFLAGS += -DXFS_RW_TRACE
51 EXTRA_CFLAGS += -DPAGEBUF_TRACE
52 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
53endif
54
55obj-$(CONFIG_XFS_FS) += xfs.o
56
57xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
58 xfs_dquot.o \
59 xfs_dquot_item.o \
60 xfs_trans_dquot.o \
61 xfs_qm_syscalls.o \
62 xfs_qm_bhv.o \
63 xfs_qm.o)
64ifeq ($(CONFIG_XFS_QUOTA),y)
65xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
66endif
67
68xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
69xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
70xfs-$(CONFIG_PROC_FS) += linux-2.6/xfs_stats.o
71xfs-$(CONFIG_SYSCTL) += linux-2.6/xfs_sysctl.o
72xfs-$(CONFIG_COMPAT) += linux-2.6/xfs_ioctl32.o
73xfs-$(CONFIG_XFS_EXPORT) += linux-2.6/xfs_export.o
74
75
76xfs-y += xfs_alloc.o \
77 xfs_alloc_btree.o \
78 xfs_attr.o \
79 xfs_attr_leaf.o \
80 xfs_behavior.o \
81 xfs_bit.o \
82 xfs_bmap.o \
83 xfs_bmap_btree.o \
84 xfs_btree.o \
85 xfs_buf_item.o \
86 xfs_da_btree.o \
87 xfs_dir.o \
88 xfs_dir2.o \
89 xfs_dir2_block.o \
90 xfs_dir2_data.o \
91 xfs_dir2_leaf.o \
92 xfs_dir2_node.o \
93 xfs_dir2_sf.o \
94 xfs_dir_leaf.o \
95 xfs_error.o \
96 xfs_extfree_item.o \
97 xfs_fsops.o \
98 xfs_ialloc.o \
99 xfs_ialloc_btree.o \
100 xfs_iget.o \
101 xfs_inode.o \
102 xfs_inode_item.o \
103 xfs_iocore.o \
104 xfs_iomap.o \
105 xfs_itable.o \
106 xfs_dfrag.o \
107 xfs_log.o \
108 xfs_log_recover.o \
109 xfs_macros.o \
110 xfs_mount.o \
111 xfs_rename.o \
112 xfs_trans.o \
113 xfs_trans_ail.o \
114 xfs_trans_buf.o \
115 xfs_trans_extfree.o \
116 xfs_trans_inode.o \
117 xfs_trans_item.o \
118 xfs_utils.o \
119 xfs_vfsops.o \
120 xfs_vnodeops.o \
121 xfs_rw.o \
122 xfs_dmops.o \
123 xfs_qmops.o
124
125xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o
126
127# Objects in linux-2.6/
128xfs-y += $(addprefix linux-2.6/, \
129 kmem.o \
130 xfs_aops.o \
131 xfs_buf.o \
132 xfs_file.o \
133 xfs_fs_subr.o \
134 xfs_globals.o \
135 xfs_ioctl.o \
136 xfs_iops.o \
137 xfs_lrw.o \
138 xfs_super.o \
139 xfs_vfs.o \
140 xfs_vnode.o)
141
142# Objects in support/
143xfs-y += $(addprefix support/, \
144 debug.o \
145 move.o \
146 qsort.o \
147 uuid.o)
148
149xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
150
diff --git a/fs/xfs/Makefile-linux-2.6 b/fs/xfs/Makefile-linux-2.6
new file mode 100644
index 000000000000..fbfcbe5a7cda
--- /dev/null
+++ b/fs/xfs/Makefile-linux-2.6
@@ -0,0 +1,141 @@
1#
2# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of version 2 of the GNU General Public License as
6# published by the Free Software Foundation.
7#
8# This program is distributed in the hope that it would be useful, but
9# WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11#
12# Further, this software is distributed without any warranty that it is
13# free of the rightful claim of any third person regarding infringement
14# or the like. Any license provided herein, whether implied or
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31#
32
33EXTRA_CFLAGS += -Ifs/xfs -Ifs/xfs/linux-2.6 -funsigned-char
34
35XFS_LINUX := linux-2.6
36
37ifeq ($(CONFIG_XFS_DEBUG),y)
38 EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG
39 EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
40endif
41ifeq ($(CONFIG_XFS_TRACE),y)
42 EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
43 EXTRA_CFLAGS += -DXFS_ATTR_TRACE
44 EXTRA_CFLAGS += -DXFS_BLI_TRACE
45 EXTRA_CFLAGS += -DXFS_BMAP_TRACE
46 EXTRA_CFLAGS += -DXFS_BMBT_TRACE
47 EXTRA_CFLAGS += -DXFS_DIR_TRACE
48 EXTRA_CFLAGS += -DXFS_DIR2_TRACE
49 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
50 EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
51 EXTRA_CFLAGS += -DXFS_LOG_TRACE
52 EXTRA_CFLAGS += -DXFS_RW_TRACE
53 EXTRA_CFLAGS += -DPAGEBUF_TRACE
54 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
55endif
56
57obj-$(CONFIG_XFS_FS) += xfs.o
58obj-$(CONFIG_XFS_QUOTA) += quota/
59
60xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
61xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
62xfs-$(CONFIG_PROC_FS) += $(XFS_LINUX)/xfs_stats.o
63xfs-$(CONFIG_SYSCTL) += $(XFS_LINUX)/xfs_sysctl.o
64xfs-$(CONFIG_COMPAT) += $(XFS_LINUX)/xfs_ioctl32.o
65xfs-$(CONFIG_XFS_EXPORT) += $(XFS_LINUX)/xfs_export.o
66
67
68xfs-y += xfs_alloc.o \
69 xfs_alloc_btree.o \
70 xfs_attr.o \
71 xfs_attr_leaf.o \
72 xfs_behavior.o \
73 xfs_bit.o \
74 xfs_bmap.o \
75 xfs_bmap_btree.o \
76 xfs_btree.o \
77 xfs_buf_item.o \
78 xfs_da_btree.o \
79 xfs_dir.o \
80 xfs_dir2.o \
81 xfs_dir2_block.o \
82 xfs_dir2_data.o \
83 xfs_dir2_leaf.o \
84 xfs_dir2_node.o \
85 xfs_dir2_sf.o \
86 xfs_dir_leaf.o \
87 xfs_error.o \
88 xfs_extfree_item.o \
89 xfs_fsops.o \
90 xfs_ialloc.o \
91 xfs_ialloc_btree.o \
92 xfs_iget.o \
93 xfs_inode.o \
94 xfs_inode_item.o \
95 xfs_iocore.o \
96 xfs_iomap.o \
97 xfs_itable.o \
98 xfs_dfrag.o \
99 xfs_log.o \
100 xfs_log_recover.o \
101 xfs_macros.o \
102 xfs_mount.o \
103 xfs_rename.o \
104 xfs_trans.o \
105 xfs_trans_ail.o \
106 xfs_trans_buf.o \
107 xfs_trans_extfree.o \
108 xfs_trans_inode.o \
109 xfs_trans_item.o \
110 xfs_utils.o \
111 xfs_vfsops.o \
112 xfs_vnodeops.o \
113 xfs_rw.o \
114 xfs_dmops.o \
115 xfs_qmops.o
116
117xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o
118
119# Objects in linux/
120xfs-y += $(addprefix $(XFS_LINUX)/, \
121 kmem.o \
122 xfs_aops.o \
123 xfs_buf.o \
124 xfs_file.o \
125 xfs_fs_subr.o \
126 xfs_globals.o \
127 xfs_ioctl.o \
128 xfs_iops.o \
129 xfs_lrw.o \
130 xfs_super.o \
131 xfs_vfs.o \
132 xfs_vnode.o)
133
134# Objects in support/
135xfs-y += $(addprefix support/, \
136 debug.o \
137 move.o \
138 uuid.o)
139
140xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
141
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 364ea8c386b1..4b184559f231 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -45,11 +45,11 @@
45 45
46 46
47void * 47void *
48kmem_alloc(size_t size, int flags) 48kmem_alloc(size_t size, unsigned int __nocast flags)
49{ 49{
50 int retries = 0; 50 int retries = 0;
51 int lflags = kmem_flags_convert(flags); 51 unsigned int lflags = kmem_flags_convert(flags);
52 void *ptr; 52 void *ptr;
53 53
54 do { 54 do {
55 if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) 55 if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
@@ -67,7 +67,7 @@ kmem_alloc(size_t size, int flags)
67} 67}
68 68
69void * 69void *
70kmem_zalloc(size_t size, int flags) 70kmem_zalloc(size_t size, unsigned int __nocast flags)
71{ 71{
72 void *ptr; 72 void *ptr;
73 73
@@ -89,7 +89,8 @@ kmem_free(void *ptr, size_t size)
89} 89}
90 90
91void * 91void *
92kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags) 92kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
93 unsigned int __nocast flags)
93{ 94{
94 void *new; 95 void *new;
95 96
@@ -104,11 +105,11 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
104} 105}
105 106
106void * 107void *
107kmem_zone_alloc(kmem_zone_t *zone, int flags) 108kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
108{ 109{
109 int retries = 0; 110 int retries = 0;
110 int lflags = kmem_flags_convert(flags); 111 unsigned int lflags = kmem_flags_convert(flags);
111 void *ptr; 112 void *ptr;
112 113
113 do { 114 do {
114 ptr = kmem_cache_alloc(zone, lflags); 115 ptr = kmem_cache_alloc(zone, lflags);
@@ -123,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, int flags)
123} 124}
124 125
125void * 126void *
126kmem_zone_zalloc(kmem_zone_t *zone, int flags) 127kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
127{ 128{
128 void *ptr; 129 void *ptr;
129 130
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 1397b669b059..109fcf27e256 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -39,10 +39,10 @@
39/* 39/*
40 * memory management routines 40 * memory management routines
41 */ 41 */
42#define KM_SLEEP 0x0001 42#define KM_SLEEP 0x0001u
43#define KM_NOSLEEP 0x0002 43#define KM_NOSLEEP 0x0002u
44#define KM_NOFS 0x0004 44#define KM_NOFS 0x0004u
45#define KM_MAYFAIL 0x0008 45#define KM_MAYFAIL 0x0008u
46 46
47#define kmem_zone kmem_cache_s 47#define kmem_zone kmem_cache_s
48#define kmem_zone_t kmem_cache_t 48#define kmem_zone_t kmem_cache_t
@@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t;
81 *(NSTATEP) = *(OSTATEP); \ 81 *(NSTATEP) = *(OSTATEP); \
82} while (0) 82} while (0)
83 83
84static __inline unsigned int kmem_flags_convert(int flags) 84static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags)
85{ 85{
86 int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ 86 unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
87 87
88#ifdef DEBUG 88#ifdef DEBUG
89 if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { 89 if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
@@ -125,12 +125,13 @@ kmem_zone_destroy(kmem_zone_t *zone)
125 BUG(); 125 BUG();
126} 126}
127 127
128extern void *kmem_zone_zalloc(kmem_zone_t *, int); 128extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
129extern void *kmem_zone_alloc(kmem_zone_t *, int); 129extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
130 130
131extern void *kmem_alloc(size_t, int); 131extern void *kmem_alloc(size_t, unsigned int __nocast);
132extern void *kmem_realloc(void *, size_t, size_t, int); 132extern void *kmem_realloc(void *, size_t, size_t,
133extern void *kmem_zalloc(size_t, int); 133 unsigned int __nocast);
134extern void *kmem_zalloc(size_t, unsigned int __nocast);
134extern void kmem_free(void *, size_t); 135extern void kmem_free(void *, size_t);
135 136
136typedef struct shrinker *kmem_shaker_t; 137typedef struct shrinker *kmem_shaker_t;
diff --git a/fs/xfs/linux-2.6/spin.h b/fs/xfs/linux-2.6/spin.h
index bcf60a0b8df0..0039504069a5 100644
--- a/fs/xfs/linux-2.6/spin.h
+++ b/fs/xfs/linux-2.6/spin.h
@@ -45,6 +45,9 @@
45typedef spinlock_t lock_t; 45typedef spinlock_t lock_t;
46 46
47#define SPLDECL(s) unsigned long s 47#define SPLDECL(s) unsigned long s
48#ifndef DEFINE_SPINLOCK
49#define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED
50#endif
48 51
49#define spinlock_init(lock, name) spin_lock_init(lock) 52#define spinlock_init(lock, name) spin_lock_init(lock)
50#define spinlock_destroy(lock) 53#define spinlock_destroy(lock)
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index a3a4b5aaf5d9..c6c077978fe3 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -104,66 +104,114 @@ xfs_page_trace(
104#define xfs_page_trace(tag, inode, page, mask) 104#define xfs_page_trace(tag, inode, page, mask)
105#endif 105#endif
106 106
107void 107/*
108linvfs_unwritten_done( 108 * Schedule IO completion handling on a xfsdatad if this was
109 struct buffer_head *bh, 109 * the final hold on this ioend.
110 int uptodate) 110 */
111STATIC void
112xfs_finish_ioend(
113 xfs_ioend_t *ioend)
111{ 114{
112 xfs_buf_t *pb = (xfs_buf_t *)bh->b_private; 115 if (atomic_dec_and_test(&ioend->io_remaining))
116 queue_work(xfsdatad_workqueue, &ioend->io_work);
117}
113 118
114 ASSERT(buffer_unwritten(bh)); 119STATIC void
115 bh->b_end_io = NULL; 120xfs_destroy_ioend(
116 clear_buffer_unwritten(bh); 121 xfs_ioend_t *ioend)
117 if (!uptodate) 122{
118 pagebuf_ioerror(pb, EIO); 123 vn_iowake(ioend->io_vnode);
119 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { 124 mempool_free(ioend, xfs_ioend_pool);
120 pagebuf_iodone(pb, 1, 1);
121 }
122 end_buffer_async_write(bh, uptodate);
123} 125}
124 126
125/* 127/*
126 * Issue transactions to convert a buffer range from unwritten 128 * Issue transactions to convert a buffer range from unwritten
127 * to written extents (buffered IO). 129 * to written extents.
128 */ 130 */
129STATIC void 131STATIC void
130linvfs_unwritten_convert( 132xfs_end_bio_unwritten(
131 xfs_buf_t *bp) 133 void *data)
132{ 134{
133 vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); 135 xfs_ioend_t *ioend = data;
134 int error; 136 vnode_t *vp = ioend->io_vnode;
137 xfs_off_t offset = ioend->io_offset;
138 size_t size = ioend->io_size;
139 struct buffer_head *bh, *next;
140 int error;
141
142 if (ioend->io_uptodate)
143 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
144
145 /* ioend->io_buffer_head is only non-NULL for buffered I/O */
146 for (bh = ioend->io_buffer_head; bh; bh = next) {
147 next = bh->b_private;
148
149 bh->b_end_io = NULL;
150 clear_buffer_unwritten(bh);
151 end_buffer_async_write(bh, ioend->io_uptodate);
152 }
135 153
136 BUG_ON(atomic_read(&bp->pb_hold) < 1); 154 xfs_destroy_ioend(ioend);
137 VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
138 BMAPI_UNWRITTEN, NULL, NULL, error);
139 XFS_BUF_SET_FSPRIVATE(bp, NULL);
140 XFS_BUF_CLR_IODONE_FUNC(bp);
141 XFS_BUF_UNDATAIO(bp);
142 iput(LINVFS_GET_IP(vp));
143 pagebuf_iodone(bp, 0, 0);
144} 155}
145 156
146/* 157/*
147 * Issue transactions to convert a buffer range from unwritten 158 * Allocate and initialise an IO completion structure.
148 * to written extents (direct IO). 159 * We need to track unwritten extent write completion here initially.
160 * We'll need to extend this for updating the ondisk inode size later
161 * (vs. incore size).
149 */ 162 */
150STATIC void 163STATIC xfs_ioend_t *
151linvfs_unwritten_convert_direct( 164xfs_alloc_ioend(
152 struct kiocb *iocb, 165 struct inode *inode)
153 loff_t offset,
154 ssize_t size,
155 void *private)
156{ 166{
157 struct inode *inode = iocb->ki_filp->f_dentry->d_inode; 167 xfs_ioend_t *ioend;
158 ASSERT(!private || inode == (struct inode *)private);
159 168
160 /* private indicates an unwritten extent lay beneath this IO */ 169 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
161 if (private && size > 0) {
162 vnode_t *vp = LINVFS_GET_VP(inode);
163 int error;
164 170
165 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); 171 /*
166 } 172 * Set the count to 1 initially, which will prevent an I/O
173 * completion callback from happening before we have started
174 * all the I/O from calling the completion routine too early.
175 */
176 atomic_set(&ioend->io_remaining, 1);
177 ioend->io_uptodate = 1; /* cleared if any I/O fails */
178 ioend->io_vnode = LINVFS_GET_VP(inode);
179 ioend->io_buffer_head = NULL;
180 atomic_inc(&ioend->io_vnode->v_iocount);
181 ioend->io_offset = 0;
182 ioend->io_size = 0;
183
184 INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
185
186 return ioend;
187}
188
189void
190linvfs_unwritten_done(
191 struct buffer_head *bh,
192 int uptodate)
193{
194 xfs_ioend_t *ioend = bh->b_private;
195 static spinlock_t unwritten_done_lock = SPIN_LOCK_UNLOCKED;
196 unsigned long flags;
197
198 ASSERT(buffer_unwritten(bh));
199 bh->b_end_io = NULL;
200
201 if (!uptodate)
202 ioend->io_uptodate = 0;
203
204 /*
205 * Deep magic here. We reuse b_private in the buffer_heads to build
206 * a chain for completing the I/O from user context after we've issued
207 * a transaction to convert the unwritten extent.
208 */
209 spin_lock_irqsave(&unwritten_done_lock, flags);
210 bh->b_private = ioend->io_buffer_head;
211 ioend->io_buffer_head = bh;
212 spin_unlock_irqrestore(&unwritten_done_lock, flags);
213
214 xfs_finish_ioend(ioend);
167} 215}
168 216
169STATIC int 217STATIC int
@@ -255,7 +303,7 @@ xfs_probe_unwritten_page(
255 struct address_space *mapping, 303 struct address_space *mapping,
256 pgoff_t index, 304 pgoff_t index,
257 xfs_iomap_t *iomapp, 305 xfs_iomap_t *iomapp,
258 xfs_buf_t *pb, 306 xfs_ioend_t *ioend,
259 unsigned long max_offset, 307 unsigned long max_offset,
260 unsigned long *fsbs, 308 unsigned long *fsbs,
261 unsigned int bbits) 309 unsigned int bbits)
@@ -283,7 +331,7 @@ xfs_probe_unwritten_page(
283 break; 331 break;
284 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp); 332 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
285 set_buffer_unwritten_io(bh); 333 set_buffer_unwritten_io(bh);
286 bh->b_private = pb; 334 bh->b_private = ioend;
287 p_offset += bh->b_size; 335 p_offset += bh->b_size;
288 (*fsbs)++; 336 (*fsbs)++;
289 } while ((bh = bh->b_this_page) != head); 337 } while ((bh = bh->b_this_page) != head);
@@ -434,34 +482,15 @@ xfs_map_unwritten(
434{ 482{
435 struct buffer_head *bh = curr; 483 struct buffer_head *bh = curr;
436 xfs_iomap_t *tmp; 484 xfs_iomap_t *tmp;
437 xfs_buf_t *pb; 485 xfs_ioend_t *ioend;
438 loff_t offset, size; 486 loff_t offset;
439 unsigned long nblocks = 0; 487 unsigned long nblocks = 0;
440 488
441 offset = start_page->index; 489 offset = start_page->index;
442 offset <<= PAGE_CACHE_SHIFT; 490 offset <<= PAGE_CACHE_SHIFT;
443 offset += p_offset; 491 offset += p_offset;
444 492
445 /* get an "empty" pagebuf to manage IO completion 493 ioend = xfs_alloc_ioend(inode);
446 * Proper values will be set before returning */
447 pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
448 if (!pb)
449 return -EAGAIN;
450
451 /* Take a reference to the inode to prevent it from
452 * being reclaimed while we have outstanding unwritten
453 * extent IO on it.
454 */
455 if ((igrab(inode)) != inode) {
456 pagebuf_free(pb);
457 return -EAGAIN;
458 }
459
460 /* Set the count to 1 initially, this will stop an I/O
461 * completion callout which happens before we have started
462 * all the I/O from calling pagebuf_iodone too early.
463 */
464 atomic_set(&pb->pb_io_remaining, 1);
465 494
466 /* First map forwards in the page consecutive buffers 495 /* First map forwards in the page consecutive buffers
467 * covering this unwritten extent 496 * covering this unwritten extent
@@ -474,12 +503,12 @@ xfs_map_unwritten(
474 break; 503 break;
475 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp); 504 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
476 set_buffer_unwritten_io(bh); 505 set_buffer_unwritten_io(bh);
477 bh->b_private = pb; 506 bh->b_private = ioend;
478 p_offset += bh->b_size; 507 p_offset += bh->b_size;
479 nblocks++; 508 nblocks++;
480 } while ((bh = bh->b_this_page) != head); 509 } while ((bh = bh->b_this_page) != head);
481 510
482 atomic_add(nblocks, &pb->pb_io_remaining); 511 atomic_add(nblocks, &ioend->io_remaining);
483 512
484 /* If we reached the end of the page, map forwards in any 513 /* If we reached the end of the page, map forwards in any
485 * following pages which are also covered by this extent. 514 * following pages which are also covered by this extent.
@@ -496,13 +525,13 @@ xfs_map_unwritten(
496 tloff = min(tlast, tloff); 525 tloff = min(tlast, tloff);
497 for (tindex = start_page->index + 1; tindex < tloff; tindex++) { 526 for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
498 page = xfs_probe_unwritten_page(mapping, 527 page = xfs_probe_unwritten_page(mapping,
499 tindex, iomapp, pb, 528 tindex, iomapp, ioend,
500 PAGE_CACHE_SIZE, &bs, bbits); 529 PAGE_CACHE_SIZE, &bs, bbits);
501 if (!page) 530 if (!page)
502 break; 531 break;
503 nblocks += bs; 532 nblocks += bs;
504 atomic_add(bs, &pb->pb_io_remaining); 533 atomic_add(bs, &ioend->io_remaining);
505 xfs_convert_page(inode, page, iomapp, wbc, pb, 534 xfs_convert_page(inode, page, iomapp, wbc, ioend,
506 startio, all_bh); 535 startio, all_bh);
507 /* stop if converting the next page might add 536 /* stop if converting the next page might add
508 * enough blocks that the corresponding byte 537 * enough blocks that the corresponding byte
@@ -514,12 +543,12 @@ xfs_map_unwritten(
514 if (tindex == tlast && 543 if (tindex == tlast &&
515 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) { 544 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
516 page = xfs_probe_unwritten_page(mapping, 545 page = xfs_probe_unwritten_page(mapping,
517 tindex, iomapp, pb, 546 tindex, iomapp, ioend,
518 pg_offset, &bs, bbits); 547 pg_offset, &bs, bbits);
519 if (page) { 548 if (page) {
520 nblocks += bs; 549 nblocks += bs;
521 atomic_add(bs, &pb->pb_io_remaining); 550 atomic_add(bs, &ioend->io_remaining);
522 xfs_convert_page(inode, page, iomapp, wbc, pb, 551 xfs_convert_page(inode, page, iomapp, wbc, ioend,
523 startio, all_bh); 552 startio, all_bh);
524 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) 553 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
525 goto enough; 554 goto enough;
@@ -528,21 +557,9 @@ xfs_map_unwritten(
528 } 557 }
529 558
530enough: 559enough:
531 size = nblocks; /* NB: using 64bit number here */ 560 ioend->io_size = (xfs_off_t)nblocks << block_bits;
532 size <<= block_bits; /* convert fsb's to byte range */ 561 ioend->io_offset = offset;
533 562 xfs_finish_ioend(ioend);
534 XFS_BUF_DATAIO(pb);
535 XFS_BUF_ASYNC(pb);
536 XFS_BUF_SET_SIZE(pb, size);
537 XFS_BUF_SET_COUNT(pb, size);
538 XFS_BUF_SET_OFFSET(pb, offset);
539 XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
540 XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
541
542 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
543 pagebuf_iodone(pb, 1, 1);
544 }
545
546 return 0; 563 return 0;
547} 564}
548 565
@@ -787,7 +804,7 @@ xfs_page_state_convert(
787 continue; 804 continue;
788 if (!iomp) { 805 if (!iomp) {
789 err = xfs_map_blocks(inode, offset, len, &iomap, 806 err = xfs_map_blocks(inode, offset, len, &iomap,
790 BMAPI_READ|BMAPI_IGNSTATE); 807 BMAPI_WRITE|BMAPI_IGNSTATE);
791 if (err) { 808 if (err) {
792 goto error; 809 goto error;
793 } 810 }
@@ -1028,6 +1045,44 @@ linvfs_get_blocks_direct(
1028 create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1045 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1029} 1046}
1030 1047
1048STATIC void
1049linvfs_end_io_direct(
1050 struct kiocb *iocb,
1051 loff_t offset,
1052 ssize_t size,
1053 void *private)
1054{
1055 xfs_ioend_t *ioend = iocb->private;
1056
1057 /*
1058 * Non-NULL private data means we need to issue a transaction to
1059 * convert a range from unwritten to written extents. This needs
1060 * to happen from process contect but aio+dio I/O completion
1061 * happens from irq context so we need to defer it to a workqueue.
1062 * This is not nessecary for synchronous direct I/O, but we do
1063 * it anyway to keep the code uniform and simpler.
1064 *
1065 * The core direct I/O code might be changed to always call the
1066 * completion handler in the future, in which case all this can
1067 * go away.
1068 */
1069 if (private && size > 0) {
1070 ioend->io_offset = offset;
1071 ioend->io_size = size;
1072 xfs_finish_ioend(ioend);
1073 } else {
1074 ASSERT(size >= 0);
1075 xfs_destroy_ioend(ioend);
1076 }
1077
1078 /*
1079 * blockdev_direct_IO can return an error even afer the I/O
1080 * completion handler was called. Thus we need to protect
1081 * against double-freeing.
1082 */
1083 iocb->private = NULL;
1084}
1085
1031STATIC ssize_t 1086STATIC ssize_t
1032linvfs_direct_IO( 1087linvfs_direct_IO(
1033 int rw, 1088 int rw,
@@ -1042,16 +1097,23 @@ linvfs_direct_IO(
1042 xfs_iomap_t iomap; 1097 xfs_iomap_t iomap;
1043 int maps = 1; 1098 int maps = 1;
1044 int error; 1099 int error;
1100 ssize_t ret;
1045 1101
1046 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error); 1102 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1047 if (error) 1103 if (error)
1048 return -error; 1104 return -error;
1049 1105
1050 return blockdev_direct_IO_own_locking(rw, iocb, inode, 1106 iocb->private = xfs_alloc_ioend(inode);
1107
1108 ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
1051 iomap.iomap_target->pbr_bdev, 1109 iomap.iomap_target->pbr_bdev,
1052 iov, offset, nr_segs, 1110 iov, offset, nr_segs,
1053 linvfs_get_blocks_direct, 1111 linvfs_get_blocks_direct,
1054 linvfs_unwritten_convert_direct); 1112 linvfs_end_io_direct);
1113
1114 if (unlikely(ret <= 0 && iocb->private))
1115 xfs_destroy_ioend(iocb->private);
1116 return ret;
1055} 1117}
1056 1118
1057 1119
@@ -1202,6 +1264,16 @@ out_unlock:
1202 return error; 1264 return error;
1203} 1265}
1204 1266
1267STATIC int
1268linvfs_invalidate_page(
1269 struct page *page,
1270 unsigned long offset)
1271{
1272 xfs_page_trace(XFS_INVALIDPAGE_ENTER,
1273 page->mapping->host, page, offset);
1274 return block_invalidatepage(page, offset);
1275}
1276
1205/* 1277/*
1206 * Called to move a page into cleanable state - and from there 1278 * Called to move a page into cleanable state - and from there
1207 * to be released. Possibly the page is already clean. We always 1279 * to be released. Possibly the page is already clean. We always
@@ -1279,6 +1351,7 @@ struct address_space_operations linvfs_aops = {
1279 .writepage = linvfs_writepage, 1351 .writepage = linvfs_writepage,
1280 .sync_page = block_sync_page, 1352 .sync_page = block_sync_page,
1281 .releasepage = linvfs_release_page, 1353 .releasepage = linvfs_release_page,
1354 .invalidatepage = linvfs_invalidate_page,
1282 .prepare_write = linvfs_prepare_write, 1355 .prepare_write = linvfs_prepare_write,
1283 .commit_write = generic_commit_write, 1356 .commit_write = generic_commit_write,
1284 .bmap = linvfs_bmap, 1357 .bmap = linvfs_bmap,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
new file mode 100644
index 000000000000..2fa62974a04d
--- /dev/null
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32#ifndef __XFS_AOPS_H__
33#define __XFS_AOPS_H__
34
35extern struct workqueue_struct *xfsdatad_workqueue;
36extern mempool_t *xfs_ioend_pool;
37
38typedef void (*xfs_ioend_func_t)(void *);
39
40typedef struct xfs_ioend {
41 unsigned int io_uptodate; /* I/O status register */
42 atomic_t io_remaining; /* hold count */
43 struct vnode *io_vnode; /* file being written to */
44 struct buffer_head *io_buffer_head;/* buffer linked list head */
45 size_t io_size; /* size of the extent */
46 xfs_off_t io_offset; /* offset in the file */
47 struct work_struct io_work; /* xfsdatad work queue */
48} xfs_ioend_t;
49
50#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index df0cba239dd5..655bf4a78afe 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -54,6 +54,7 @@
54#include <linux/percpu.h> 54#include <linux/percpu.h>
55#include <linux/blkdev.h> 55#include <linux/blkdev.h>
56#include <linux/hash.h> 56#include <linux/hash.h>
57#include <linux/kthread.h>
57 58
58#include "xfs_linux.h" 59#include "xfs_linux.h"
59 60
@@ -67,7 +68,7 @@ STATIC int xfsbufd_wakeup(int, unsigned int);
67STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); 68STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
68 69
69STATIC struct workqueue_struct *xfslogd_workqueue; 70STATIC struct workqueue_struct *xfslogd_workqueue;
70STATIC struct workqueue_struct *xfsdatad_workqueue; 71struct workqueue_struct *xfsdatad_workqueue;
71 72
72/* 73/*
73 * Pagebuf debugging 74 * Pagebuf debugging
@@ -590,8 +591,10 @@ found:
590 PB_SET_OWNER(pb); 591 PB_SET_OWNER(pb);
591 } 592 }
592 593
593 if (pb->pb_flags & PBF_STALE) 594 if (pb->pb_flags & PBF_STALE) {
595 ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
594 pb->pb_flags &= PBF_MAPPED; 596 pb->pb_flags &= PBF_MAPPED;
597 }
595 PB_TRACE(pb, "got_lock", 0); 598 PB_TRACE(pb, "got_lock", 0);
596 XFS_STATS_INC(pb_get_locked); 599 XFS_STATS_INC(pb_get_locked);
597 return (pb); 600 return (pb);
@@ -700,25 +703,6 @@ xfs_buf_read_flags(
700} 703}
701 704
702/* 705/*
703 * Create a skeletal pagebuf (no pages associated with it).
704 */
705xfs_buf_t *
706pagebuf_lookup(
707 xfs_buftarg_t *target,
708 loff_t ioff,
709 size_t isize,
710 page_buf_flags_t flags)
711{
712 xfs_buf_t *pb;
713
714 pb = pagebuf_allocate(flags);
715 if (pb) {
716 _pagebuf_initialize(pb, target, ioff, isize, flags);
717 }
718 return pb;
719}
720
721/*
722 * If we are not low on memory then do the readahead in a deadlock 706 * If we are not low on memory then do the readahead in a deadlock
723 * safe manner. 707 * safe manner.
724 */ 708 */
@@ -913,22 +897,23 @@ pagebuf_rele(
913 do_free = 0; 897 do_free = 0;
914 } 898 }
915 899
916 if (pb->pb_flags & PBF_DELWRI) { 900 if (pb->pb_flags & PBF_FS_MANAGED) {
917 pb->pb_flags |= PBF_ASYNC;
918 atomic_inc(&pb->pb_hold);
919 pagebuf_delwri_queue(pb, 0);
920 do_free = 0;
921 } else if (pb->pb_flags & PBF_FS_MANAGED) {
922 do_free = 0; 901 do_free = 0;
923 } 902 }
924 903
925 if (do_free) { 904 if (do_free) {
905 ASSERT((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == 0);
926 list_del_init(&pb->pb_hash_list); 906 list_del_init(&pb->pb_hash_list);
927 spin_unlock(&hash->bh_lock); 907 spin_unlock(&hash->bh_lock);
928 pagebuf_free(pb); 908 pagebuf_free(pb);
929 } else { 909 } else {
930 spin_unlock(&hash->bh_lock); 910 spin_unlock(&hash->bh_lock);
931 } 911 }
912 } else {
913 /*
914 * Catch reference count leaks
915 */
916 ASSERT(atomic_read(&pb->pb_hold) >= 0);
932 } 917 }
933} 918}
934 919
@@ -1006,13 +991,24 @@ pagebuf_lock(
1006 * pagebuf_unlock 991 * pagebuf_unlock
1007 * 992 *
1008 * pagebuf_unlock releases the lock on the buffer object created by 993 * pagebuf_unlock releases the lock on the buffer object created by
1009 * pagebuf_lock or pagebuf_cond_lock (not any 994 * pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
1010 * pinning of underlying pages created by pagebuf_pin). 995 * created by pagebuf_pin).
996 *
997 * If the buffer is marked delwri but is not queued, do so before we
998 * unlock the buffer as we need to set flags correctly. We also need to
999 * take a reference for the delwri queue because the unlocker is going to
1000 * drop their's and they don't know we just queued it.
1011 */ 1001 */
1012void 1002void
1013pagebuf_unlock( /* unlock buffer */ 1003pagebuf_unlock( /* unlock buffer */
1014 xfs_buf_t *pb) /* buffer to unlock */ 1004 xfs_buf_t *pb) /* buffer to unlock */
1015{ 1005{
1006 if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
1007 atomic_inc(&pb->pb_hold);
1008 pb->pb_flags |= PBF_ASYNC;
1009 pagebuf_delwri_queue(pb, 0);
1010 }
1011
1016 PB_CLEAR_OWNER(pb); 1012 PB_CLEAR_OWNER(pb);
1017 up(&pb->pb_sema); 1013 up(&pb->pb_sema);
1018 PB_TRACE(pb, "unlock", 0); 1014 PB_TRACE(pb, "unlock", 0);
@@ -1249,8 +1245,8 @@ bio_end_io_pagebuf(
1249 int error) 1245 int error)
1250{ 1246{
1251 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; 1247 xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private;
1252 unsigned int i, blocksize = pb->pb_target->pbr_bsize; 1248 unsigned int blocksize = pb->pb_target->pbr_bsize;
1253 struct bio_vec *bvec = bio->bi_io_vec; 1249 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1254 1250
1255 if (bio->bi_size) 1251 if (bio->bi_size)
1256 return 1; 1252 return 1;
@@ -1258,10 +1254,12 @@ bio_end_io_pagebuf(
1258 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1254 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1259 pb->pb_error = EIO; 1255 pb->pb_error = EIO;
1260 1256
1261 for (i = 0; i < bio->bi_vcnt; i++, bvec++) { 1257 do {
1262 struct page *page = bvec->bv_page; 1258 struct page *page = bvec->bv_page;
1263 1259
1264 if (pb->pb_error) { 1260 if (unlikely(pb->pb_error)) {
1261 if (pb->pb_flags & PBF_READ)
1262 ClearPageUptodate(page);
1265 SetPageError(page); 1263 SetPageError(page);
1266 } else if (blocksize == PAGE_CACHE_SIZE) { 1264 } else if (blocksize == PAGE_CACHE_SIZE) {
1267 SetPageUptodate(page); 1265 SetPageUptodate(page);
@@ -1270,10 +1268,13 @@ bio_end_io_pagebuf(
1270 set_page_region(page, bvec->bv_offset, bvec->bv_len); 1268 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1271 } 1269 }
1272 1270
1271 if (--bvec >= bio->bi_io_vec)
1272 prefetchw(&bvec->bv_page->flags);
1273
1273 if (_pagebuf_iolocked(pb)) { 1274 if (_pagebuf_iolocked(pb)) {
1274 unlock_page(page); 1275 unlock_page(page);
1275 } 1276 }
1276 } 1277 } while (bvec >= bio->bi_io_vec);
1277 1278
1278 _pagebuf_iodone(pb, 1); 1279 _pagebuf_iodone(pb, 1);
1279 bio_put(bio); 1280 bio_put(bio);
@@ -1511,6 +1512,11 @@ again:
1511 ASSERT(btp == bp->pb_target); 1512 ASSERT(btp == bp->pb_target);
1512 if (!(bp->pb_flags & PBF_FS_MANAGED)) { 1513 if (!(bp->pb_flags & PBF_FS_MANAGED)) {
1513 spin_unlock(&hash->bh_lock); 1514 spin_unlock(&hash->bh_lock);
1515 /*
1516 * Catch superblock reference count leaks
1517 * immediately
1518 */
1519 BUG_ON(bp->pb_bn == 0);
1514 delay(100); 1520 delay(100);
1515 goto again; 1521 goto again;
1516 } 1522 }
@@ -1686,17 +1692,20 @@ pagebuf_delwri_queue(
1686 int unlock) 1692 int unlock)
1687{ 1693{
1688 PB_TRACE(pb, "delwri_q", (long)unlock); 1694 PB_TRACE(pb, "delwri_q", (long)unlock);
1689 ASSERT(pb->pb_flags & PBF_DELWRI); 1695 ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
1696 (PBF_DELWRI|PBF_ASYNC));
1690 1697
1691 spin_lock(&pbd_delwrite_lock); 1698 spin_lock(&pbd_delwrite_lock);
1692 /* If already in the queue, dequeue and place at tail */ 1699 /* If already in the queue, dequeue and place at tail */
1693 if (!list_empty(&pb->pb_list)) { 1700 if (!list_empty(&pb->pb_list)) {
1701 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1694 if (unlock) { 1702 if (unlock) {
1695 atomic_dec(&pb->pb_hold); 1703 atomic_dec(&pb->pb_hold);
1696 } 1704 }
1697 list_del(&pb->pb_list); 1705 list_del(&pb->pb_list);
1698 } 1706 }
1699 1707
1708 pb->pb_flags |= _PBF_DELWRI_Q;
1700 list_add_tail(&pb->pb_list, &pbd_delwrite_queue); 1709 list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
1701 pb->pb_queuetime = jiffies; 1710 pb->pb_queuetime = jiffies;
1702 spin_unlock(&pbd_delwrite_lock); 1711 spin_unlock(&pbd_delwrite_lock);
@@ -1713,10 +1722,11 @@ pagebuf_delwri_dequeue(
1713 1722
1714 spin_lock(&pbd_delwrite_lock); 1723 spin_lock(&pbd_delwrite_lock);
1715 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { 1724 if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
1725 ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
1716 list_del_init(&pb->pb_list); 1726 list_del_init(&pb->pb_list);
1717 dequeued = 1; 1727 dequeued = 1;
1718 } 1728 }
1719 pb->pb_flags &= ~PBF_DELWRI; 1729 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1720 spin_unlock(&pbd_delwrite_lock); 1730 spin_unlock(&pbd_delwrite_lock);
1721 1731
1722 if (dequeued) 1732 if (dequeued)
@@ -1733,9 +1743,7 @@ pagebuf_runall_queues(
1733} 1743}
1734 1744
1735/* Defines for pagebuf daemon */ 1745/* Defines for pagebuf daemon */
1736STATIC DECLARE_COMPLETION(xfsbufd_done);
1737STATIC struct task_struct *xfsbufd_task; 1746STATIC struct task_struct *xfsbufd_task;
1738STATIC int xfsbufd_active;
1739STATIC int xfsbufd_force_flush; 1747STATIC int xfsbufd_force_flush;
1740STATIC int xfsbufd_force_sleep; 1748STATIC int xfsbufd_force_sleep;
1741 1749
@@ -1761,14 +1769,8 @@ xfsbufd(
1761 xfs_buftarg_t *target; 1769 xfs_buftarg_t *target;
1762 xfs_buf_t *pb, *n; 1770 xfs_buf_t *pb, *n;
1763 1771
1764 /* Set up the thread */
1765 daemonize("xfsbufd");
1766 current->flags |= PF_MEMALLOC; 1772 current->flags |= PF_MEMALLOC;
1767 1773
1768 xfsbufd_task = current;
1769 xfsbufd_active = 1;
1770 barrier();
1771
1772 INIT_LIST_HEAD(&tmp); 1774 INIT_LIST_HEAD(&tmp);
1773 do { 1775 do {
1774 if (unlikely(freezing(current))) { 1776 if (unlikely(freezing(current))) {
@@ -1795,7 +1797,7 @@ xfsbufd(
1795 break; 1797 break;
1796 } 1798 }
1797 1799
1798 pb->pb_flags &= ~PBF_DELWRI; 1800 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1799 pb->pb_flags |= PBF_WRITE; 1801 pb->pb_flags |= PBF_WRITE;
1800 list_move(&pb->pb_list, &tmp); 1802 list_move(&pb->pb_list, &tmp);
1801 } 1803 }
@@ -1816,9 +1818,9 @@ xfsbufd(
1816 purge_addresses(); 1818 purge_addresses();
1817 1819
1818 xfsbufd_force_flush = 0; 1820 xfsbufd_force_flush = 0;
1819 } while (xfsbufd_active); 1821 } while (!kthread_should_stop());
1820 1822
1821 complete_and_exit(&xfsbufd_done, 0); 1823 return 0;
1822} 1824}
1823 1825
1824/* 1826/*
@@ -1845,15 +1847,13 @@ xfs_flush_buftarg(
1845 if (pb->pb_target != target) 1847 if (pb->pb_target != target)
1846 continue; 1848 continue;
1847 1849
1848 ASSERT(pb->pb_flags & PBF_DELWRI); 1850 ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
1849 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb)); 1851 PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
1850 if (pagebuf_ispin(pb)) { 1852 if (pagebuf_ispin(pb)) {
1851 pincount++; 1853 pincount++;
1852 continue; 1854 continue;
1853 } 1855 }
1854 1856
1855 pb->pb_flags &= ~PBF_DELWRI;
1856 pb->pb_flags |= PBF_WRITE;
1857 list_move(&pb->pb_list, &tmp); 1857 list_move(&pb->pb_list, &tmp);
1858 } 1858 }
1859 spin_unlock(&pbd_delwrite_lock); 1859 spin_unlock(&pbd_delwrite_lock);
@@ -1862,12 +1862,14 @@ xfs_flush_buftarg(
1862 * Dropped the delayed write list lock, now walk the temporary list 1862 * Dropped the delayed write list lock, now walk the temporary list
1863 */ 1863 */
1864 list_for_each_entry_safe(pb, n, &tmp, pb_list) { 1864 list_for_each_entry_safe(pb, n, &tmp, pb_list) {
1865 pagebuf_lock(pb);
1866 pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
1867 pb->pb_flags |= PBF_WRITE;
1865 if (wait) 1868 if (wait)
1866 pb->pb_flags &= ~PBF_ASYNC; 1869 pb->pb_flags &= ~PBF_ASYNC;
1867 else 1870 else
1868 list_del_init(&pb->pb_list); 1871 list_del_init(&pb->pb_list);
1869 1872
1870 pagebuf_lock(pb);
1871 pagebuf_iostrategy(pb); 1873 pagebuf_iostrategy(pb);
1872 } 1874 }
1873 1875
@@ -1901,9 +1903,11 @@ xfs_buf_daemons_start(void)
1901 if (!xfsdatad_workqueue) 1903 if (!xfsdatad_workqueue)
1902 goto out_destroy_xfslogd_workqueue; 1904 goto out_destroy_xfslogd_workqueue;
1903 1905
1904 error = kernel_thread(xfsbufd, NULL, CLONE_FS|CLONE_FILES); 1906 xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
1905 if (error < 0) 1907 if (IS_ERR(xfsbufd_task)) {
1908 error = PTR_ERR(xfsbufd_task);
1906 goto out_destroy_xfsdatad_workqueue; 1909 goto out_destroy_xfsdatad_workqueue;
1910 }
1907 return 0; 1911 return 0;
1908 1912
1909 out_destroy_xfsdatad_workqueue: 1913 out_destroy_xfsdatad_workqueue:
@@ -1920,10 +1924,7 @@ xfs_buf_daemons_start(void)
1920STATIC void 1924STATIC void
1921xfs_buf_daemons_stop(void) 1925xfs_buf_daemons_stop(void)
1922{ 1926{
1923 xfsbufd_active = 0; 1927 kthread_stop(xfsbufd_task);
1924 barrier();
1925 wait_for_completion(&xfsbufd_done);
1926
1927 destroy_workqueue(xfslogd_workqueue); 1928 destroy_workqueue(xfslogd_workqueue);
1928 destroy_workqueue(xfsdatad_workqueue); 1929 destroy_workqueue(xfsdatad_workqueue);
1929} 1930}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 3f8f69a66aea..67c19f799232 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -89,6 +89,7 @@ typedef enum page_buf_flags_e { /* pb_flags values */
89 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 89 _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
90 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 90 _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
91 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 91 _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
92 _PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
92} page_buf_flags_t; 93} page_buf_flags_t;
93 94
94#define PBF_UPDATE (PBF_READ | PBF_WRITE) 95#define PBF_UPDATE (PBF_READ | PBF_WRITE)
@@ -206,13 +207,6 @@ extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
206#define xfs_buf_read(target, blkno, len, flags) \ 207#define xfs_buf_read(target, blkno, len, flags) \
207 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) 208 xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
208 209
209extern xfs_buf_t *pagebuf_lookup(
210 xfs_buftarg_t *,
211 loff_t, /* starting offset of range */
212 size_t, /* length of range */
213 page_buf_flags_t); /* PBF_READ, PBF_WRITE, */
214 /* PBF_FORCEIO, */
215
216extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ 210extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */
217 /* no memory or disk address */ 211 /* no memory or disk address */
218 size_t len, 212 size_t len,
@@ -344,8 +338,6 @@ extern void pagebuf_trace(
344 338
345 339
346 340
347
348
349/* These are just for xfs_syncsub... it sets an internal variable 341/* These are just for xfs_syncsub... it sets an internal variable
350 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t 342 * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
351 */ 343 */
@@ -452,7 +444,7 @@ extern void pagebuf_trace(
452 444
453#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr) 445#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
454 446
455extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset) 447static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
456{ 448{
457 if (bp->pb_flags & PBF_MAPPED) 449 if (bp->pb_flags & PBF_MAPPED)
458 return XFS_BUF_PTR(bp) + offset; 450 return XFS_BUF_PTR(bp) + offset;
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f1ce4323f56e..3881622bcf08 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -311,6 +311,31 @@ linvfs_fsync(
311 311
312#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen)) 312#define nextdp(dp) ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
313 313
314#ifdef CONFIG_XFS_DMAPI
315
316STATIC struct page *
317linvfs_filemap_nopage(
318 struct vm_area_struct *area,
319 unsigned long address,
320 int *type)
321{
322 struct inode *inode = area->vm_file->f_dentry->d_inode;
323 vnode_t *vp = LINVFS_GET_VP(inode);
324 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
325 int error;
326
327 ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
328
329 error = XFS_SEND_MMAP(mp, area, 0);
330 if (error)
331 return NULL;
332
333 return filemap_nopage(area, address, type);
334}
335
336#endif /* CONFIG_XFS_DMAPI */
337
338
314STATIC int 339STATIC int
315linvfs_readdir( 340linvfs_readdir(
316 struct file *filp, 341 struct file *filp,
@@ -390,14 +415,6 @@ done:
390 return -error; 415 return -error;
391} 416}
392 417
393#ifdef CONFIG_XFS_DMAPI
394STATIC void
395linvfs_mmap_close(
396 struct vm_area_struct *vma)
397{
398 xfs_dm_mm_put(vma);
399}
400#endif /* CONFIG_XFS_DMAPI */
401 418
402STATIC int 419STATIC int
403linvfs_file_mmap( 420linvfs_file_mmap(
@@ -411,16 +428,11 @@ linvfs_file_mmap(
411 428
412 vma->vm_ops = &linvfs_file_vm_ops; 429 vma->vm_ops = &linvfs_file_vm_ops;
413 430
414 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
415 xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
416
417 error = -XFS_SEND_MMAP(mp, vma, 0);
418 if (error)
419 return error;
420#ifdef CONFIG_XFS_DMAPI 431#ifdef CONFIG_XFS_DMAPI
432 if (vp->v_vfsp->vfs_flag & VFS_DMI) {
421 vma->vm_ops = &linvfs_dmapi_file_vm_ops; 433 vma->vm_ops = &linvfs_dmapi_file_vm_ops;
422#endif
423 } 434 }
435#endif /* CONFIG_XFS_DMAPI */
424 436
425 VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); 437 VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
426 if (!error) 438 if (!error)
@@ -474,6 +486,7 @@ linvfs_ioctl_invis(
474 return error; 486 return error;
475} 487}
476 488
489#ifdef CONFIG_XFS_DMAPI
477#ifdef HAVE_VMOP_MPROTECT 490#ifdef HAVE_VMOP_MPROTECT
478STATIC int 491STATIC int
479linvfs_mprotect( 492linvfs_mprotect(
@@ -494,6 +507,7 @@ linvfs_mprotect(
494 return error; 507 return error;
495} 508}
496#endif /* HAVE_VMOP_MPROTECT */ 509#endif /* HAVE_VMOP_MPROTECT */
510#endif /* CONFIG_XFS_DMAPI */
497 511
498#ifdef HAVE_FOP_OPEN_EXEC 512#ifdef HAVE_FOP_OPEN_EXEC
499/* If the user is attempting to execute a file that is offline then 513/* If the user is attempting to execute a file that is offline then
@@ -528,49 +542,10 @@ open_exec_out:
528} 542}
529#endif /* HAVE_FOP_OPEN_EXEC */ 543#endif /* HAVE_FOP_OPEN_EXEC */
530 544
531/*
532 * Temporary workaround to the AIO direct IO write problem.
533 * This code can go and we can revert to do_sync_write once
534 * the writepage(s) rework is merged.
535 */
536STATIC ssize_t
537linvfs_write(
538 struct file *filp,
539 const char __user *buf,
540 size_t len,
541 loff_t *ppos)
542{
543 struct kiocb kiocb;
544 ssize_t ret;
545
546 init_sync_kiocb(&kiocb, filp);
547 kiocb.ki_pos = *ppos;
548 ret = __linvfs_write(&kiocb, buf, 0, len, kiocb.ki_pos);
549 *ppos = kiocb.ki_pos;
550 return ret;
551}
552STATIC ssize_t
553linvfs_write_invis(
554 struct file *filp,
555 const char __user *buf,
556 size_t len,
557 loff_t *ppos)
558{
559 struct kiocb kiocb;
560 ssize_t ret;
561
562 init_sync_kiocb(&kiocb, filp);
563 kiocb.ki_pos = *ppos;
564 ret = __linvfs_write(&kiocb, buf, IO_INVIS, len, kiocb.ki_pos);
565 *ppos = kiocb.ki_pos;
566 return ret;
567}
568
569
570struct file_operations linvfs_file_operations = { 545struct file_operations linvfs_file_operations = {
571 .llseek = generic_file_llseek, 546 .llseek = generic_file_llseek,
572 .read = do_sync_read, 547 .read = do_sync_read,
573 .write = linvfs_write, 548 .write = do_sync_write,
574 .readv = linvfs_readv, 549 .readv = linvfs_readv,
575 .writev = linvfs_writev, 550 .writev = linvfs_writev,
576 .aio_read = linvfs_aio_read, 551 .aio_read = linvfs_aio_read,
@@ -592,7 +567,7 @@ struct file_operations linvfs_file_operations = {
592struct file_operations linvfs_invis_file_operations = { 567struct file_operations linvfs_invis_file_operations = {
593 .llseek = generic_file_llseek, 568 .llseek = generic_file_llseek,
594 .read = do_sync_read, 569 .read = do_sync_read,
595 .write = linvfs_write_invis, 570 .write = do_sync_write,
596 .readv = linvfs_readv_invis, 571 .readv = linvfs_readv_invis,
597 .writev = linvfs_writev_invis, 572 .writev = linvfs_writev_invis,
598 .aio_read = linvfs_aio_read_invis, 573 .aio_read = linvfs_aio_read_invis,
@@ -626,8 +601,7 @@ static struct vm_operations_struct linvfs_file_vm_ops = {
626 601
627#ifdef CONFIG_XFS_DMAPI 602#ifdef CONFIG_XFS_DMAPI
628static struct vm_operations_struct linvfs_dmapi_file_vm_ops = { 603static struct vm_operations_struct linvfs_dmapi_file_vm_ops = {
629 .close = linvfs_mmap_close, 604 .nopage = linvfs_filemap_nopage,
630 .nopage = filemap_nopage,
631 .populate = filemap_populate, 605 .populate = filemap_populate,
632#ifdef HAVE_VMOP_MPROTECT 606#ifdef HAVE_VMOP_MPROTECT
633 .mprotect = linvfs_mprotect, 607 .mprotect = linvfs_mprotect,
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 05a447e51cc0..6a3326bcd8d0 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -141,13 +141,19 @@ xfs_find_handle(
141 return -XFS_ERROR(EINVAL); 141 return -XFS_ERROR(EINVAL);
142 } 142 }
143 143
144 /* we need the vnode */ 144 switch (inode->i_mode & S_IFMT) {
145 vp = LINVFS_GET_VP(inode); 145 case S_IFREG:
146 if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) { 146 case S_IFDIR:
147 case S_IFLNK:
148 break;
149 default:
147 iput(inode); 150 iput(inode);
148 return -XFS_ERROR(EBADF); 151 return -XFS_ERROR(EBADF);
149 } 152 }
150 153
154 /* we need the vnode */
155 vp = LINVFS_GET_VP(inode);
156
151 /* now we can grab the fsid */ 157 /* now we can grab the fsid */
152 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t)); 158 memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
153 hsize = sizeof(xfs_fsid_t); 159 hsize = sizeof(xfs_fsid_t);
@@ -386,7 +392,7 @@ xfs_readlink_by_handle(
386 return -error; 392 return -error;
387 393
388 /* Restrict this handle operation to symlinks only. */ 394 /* Restrict this handle operation to symlinks only. */
389 if (vp->v_type != VLNK) { 395 if (!S_ISLNK(inode->i_mode)) {
390 VN_RELE(vp); 396 VN_RELE(vp);
391 return -XFS_ERROR(EINVAL); 397 return -XFS_ERROR(EINVAL);
392 } 398 }
@@ -982,10 +988,10 @@ xfs_ioc_space(
982 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND)) 988 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
983 return -XFS_ERROR(EPERM); 989 return -XFS_ERROR(EPERM);
984 990
985 if (!(filp->f_flags & FMODE_WRITE)) 991 if (!(filp->f_mode & FMODE_WRITE))
986 return -XFS_ERROR(EBADF); 992 return -XFS_ERROR(EBADF);
987 993
988 if (vp->v_type != VREG) 994 if (!VN_ISREG(vp))
989 return -XFS_ERROR(EINVAL); 995 return -XFS_ERROR(EINVAL);
990 996
991 if (copy_from_user(&bf, arg, sizeof(bf))) 997 if (copy_from_user(&bf, arg, sizeof(bf)))
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 0f8f1384eb36..4636b7f86f1f 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -47,8 +47,52 @@
47#include "xfs_vnode.h" 47#include "xfs_vnode.h"
48#include "xfs_dfrag.h" 48#include "xfs_dfrag.h"
49 49
50#define _NATIVE_IOC(cmd, type) \
51 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type))
52
50#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 53#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
51#define BROKEN_X86_ALIGNMENT 54#define BROKEN_X86_ALIGNMENT
55/* on ia32 l_start is on a 32-bit boundary */
56typedef struct xfs_flock64_32 {
57 __s16 l_type;
58 __s16 l_whence;
59 __s64 l_start __attribute__((packed));
60 /* len == 0 means until end of file */
61 __s64 l_len __attribute__((packed));
62 __s32 l_sysid;
63 __u32 l_pid;
64 __s32 l_pad[4]; /* reserve area */
65} xfs_flock64_32_t;
66
67#define XFS_IOC_ALLOCSP_32 _IOW ('X', 10, struct xfs_flock64_32)
68#define XFS_IOC_FREESP_32 _IOW ('X', 11, struct xfs_flock64_32)
69#define XFS_IOC_ALLOCSP64_32 _IOW ('X', 36, struct xfs_flock64_32)
70#define XFS_IOC_FREESP64_32 _IOW ('X', 37, struct xfs_flock64_32)
71#define XFS_IOC_RESVSP_32 _IOW ('X', 40, struct xfs_flock64_32)
72#define XFS_IOC_UNRESVSP_32 _IOW ('X', 41, struct xfs_flock64_32)
73#define XFS_IOC_RESVSP64_32 _IOW ('X', 42, struct xfs_flock64_32)
74#define XFS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct xfs_flock64_32)
75
76/* just account for different alignment */
77STATIC unsigned long
78xfs_ioctl32_flock(
79 unsigned long arg)
80{
81 xfs_flock64_32_t __user *p32 = (void __user *)arg;
82 xfs_flock64_t __user *p = compat_alloc_user_space(sizeof(*p));
83
84 if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) ||
85 copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) ||
86 copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) ||
87 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
88 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
89 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
90 copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
91 return -EFAULT;
92
93 return (unsigned long)p;
94}
95
52#else 96#else
53 97
54typedef struct xfs_fsop_bulkreq32 { 98typedef struct xfs_fsop_bulkreq32 {
@@ -103,7 +147,6 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
103/* not handled 147/* not handled
104 case XFS_IOC_FD_TO_HANDLE: 148 case XFS_IOC_FD_TO_HANDLE:
105 case XFS_IOC_PATH_TO_HANDLE: 149 case XFS_IOC_PATH_TO_HANDLE:
106 case XFS_IOC_PATH_TO_HANDLE:
107 case XFS_IOC_PATH_TO_FSHANDLE: 150 case XFS_IOC_PATH_TO_FSHANDLE:
108 case XFS_IOC_OPEN_BY_HANDLE: 151 case XFS_IOC_OPEN_BY_HANDLE:
109 case XFS_IOC_FSSETDM_BY_HANDLE: 152 case XFS_IOC_FSSETDM_BY_HANDLE:
@@ -124,8 +167,21 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
124 case XFS_IOC_ERROR_CLEARALL: 167 case XFS_IOC_ERROR_CLEARALL:
125 break; 168 break;
126 169
127#ifndef BROKEN_X86_ALIGNMENT 170#ifdef BROKEN_X86_ALIGNMENT
128 /* xfs_flock_t and xfs_bstat_t have wrong u32 vs u64 alignment */ 171 /* xfs_flock_t has wrong u32 vs u64 alignment */
172 case XFS_IOC_ALLOCSP_32:
173 case XFS_IOC_FREESP_32:
174 case XFS_IOC_ALLOCSP64_32:
175 case XFS_IOC_FREESP64_32:
176 case XFS_IOC_RESVSP_32:
177 case XFS_IOC_UNRESVSP_32:
178 case XFS_IOC_RESVSP64_32:
179 case XFS_IOC_UNRESVSP64_32:
180 arg = xfs_ioctl32_flock(arg);
181 cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
182 break;
183
184#else /* These are handled fine if no alignment issues */
129 case XFS_IOC_ALLOCSP: 185 case XFS_IOC_ALLOCSP:
130 case XFS_IOC_FREESP: 186 case XFS_IOC_FREESP:
131 case XFS_IOC_RESVSP: 187 case XFS_IOC_RESVSP:
@@ -134,6 +190,9 @@ __linvfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
134 case XFS_IOC_FREESP64: 190 case XFS_IOC_FREESP64:
135 case XFS_IOC_RESVSP64: 191 case XFS_IOC_RESVSP64:
136 case XFS_IOC_UNRESVSP64: 192 case XFS_IOC_UNRESVSP64:
193 break;
194
195 /* xfs_bstat_t still has wrong u32 vs u64 alignment */
137 case XFS_IOC_SWAPEXT: 196 case XFS_IOC_SWAPEXT:
138 break; 197 break;
139 198
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index f252605514eb..77708a8c9f87 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -140,7 +140,6 @@ linvfs_mknod(
140 140
141 memset(&va, 0, sizeof(va)); 141 memset(&va, 0, sizeof(va));
142 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; 142 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
143 va.va_type = IFTOVT(mode);
144 va.va_mode = mode; 143 va.va_mode = mode;
145 144
146 switch (mode & S_IFMT) { 145 switch (mode & S_IFMT) {
@@ -308,14 +307,13 @@ linvfs_symlink(
308 cvp = NULL; 307 cvp = NULL;
309 308
310 memset(&va, 0, sizeof(va)); 309 memset(&va, 0, sizeof(va));
311 va.va_type = VLNK; 310 va.va_mode = S_IFLNK |
312 va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO; 311 (irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO);
313 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE; 312 va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
314 313
315 error = 0; 314 error = 0;
316 VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); 315 VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error);
317 if (!error && cvp) { 316 if (!error && cvp) {
318 ASSERT(cvp->v_type == VLNK);
319 ip = LINVFS_GET_IP(cvp); 317 ip = LINVFS_GET_IP(cvp);
320 d_instantiate(dentry, ip); 318 d_instantiate(dentry, ip);
321 validate_fields(dir); 319 validate_fields(dir);
@@ -425,9 +423,14 @@ linvfs_follow_link(
425 return NULL; 423 return NULL;
426} 424}
427 425
428static void linvfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) 426STATIC void
427linvfs_put_link(
428 struct dentry *dentry,
429 struct nameidata *nd,
430 void *p)
429{ 431{
430 char *s = nd_get_link(nd); 432 char *s = nd_get_link(nd);
433
431 if (!IS_ERR(s)) 434 if (!IS_ERR(s))
432 kfree(s); 435 kfree(s);
433} 436}
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 42dc5e4662ed..68c5d885ed9c 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -64,7 +64,6 @@
64#include <sema.h> 64#include <sema.h>
65#include <time.h> 65#include <time.h>
66 66
67#include <support/qsort.h>
68#include <support/ktrace.h> 67#include <support/ktrace.h>
69#include <support/debug.h> 68#include <support/debug.h>
70#include <support/move.h> 69#include <support/move.h>
@@ -104,6 +103,7 @@
104#include <xfs_stats.h> 103#include <xfs_stats.h>
105#include <xfs_sysctl.h> 104#include <xfs_sysctl.h>
106#include <xfs_iops.h> 105#include <xfs_iops.h>
106#include <xfs_aops.h>
107#include <xfs_super.h> 107#include <xfs_super.h>
108#include <xfs_globals.h> 108#include <xfs_globals.h>
109#include <xfs_fs_subr.h> 109#include <xfs_fs_subr.h>
@@ -254,11 +254,18 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
254#define MAX(a,b) (max(a,b)) 254#define MAX(a,b) (max(a,b))
255#define howmany(x, y) (((x)+((y)-1))/(y)) 255#define howmany(x, y) (((x)+((y)-1))/(y))
256#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) 256#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
257#define qsort(a,n,s,fn) sort(a,n,s,fn,NULL)
257 258
259/*
260 * Various platform dependent calls that don't fit anywhere else
261 */
258#define xfs_stack_trace() dump_stack() 262#define xfs_stack_trace() dump_stack()
259
260#define xfs_itruncate_data(ip, off) \ 263#define xfs_itruncate_data(ip, off) \
261 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) 264 (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
265#define xfs_statvfs_fsid(statp, mp) \
266 ({ u64 id = huge_encode_dev((mp)->m_dev); \
267 __kernel_fsid_t *fsid = &(statp)->f_fsid; \
268 (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
262 269
263 270
264/* Move the kernel do_div definition off to one side */ 271/* Move the kernel do_div definition off to one side */
@@ -371,6 +378,4 @@ static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
371 return(x * y); 378 return(x * y);
372} 379}
373 380
374#define qsort(a, n, s, cmp) sort(a, n, s, cmp, NULL)
375
376#endif /* __XFS_LINUX__ */ 381#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index acab58c48043..3b5fabe8dae9 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -660,9 +660,6 @@ xfs_write(
660 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 660 (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
661 mp->m_rtdev_targp : mp->m_ddev_targp; 661 mp->m_rtdev_targp : mp->m_ddev_targp;
662 662
663 if (ioflags & IO_ISAIO)
664 return XFS_ERROR(-ENOSYS);
665
666 if ((pos & target->pbr_smask) || (count & target->pbr_smask)) 663 if ((pos & target->pbr_smask) || (count & target->pbr_smask))
667 return XFS_ERROR(-EINVAL); 664 return XFS_ERROR(-EINVAL);
668 665
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index f197a720e394..6294dcdb797c 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -70,9 +70,10 @@ struct xfs_iomap;
70#define XFS_SENDFILE_ENTER 21 70#define XFS_SENDFILE_ENTER 21
71#define XFS_WRITEPAGE_ENTER 22 71#define XFS_WRITEPAGE_ENTER 22
72#define XFS_RELEASEPAGE_ENTER 23 72#define XFS_RELEASEPAGE_ENTER 23
73#define XFS_IOMAP_ALLOC_ENTER 24 73#define XFS_INVALIDPAGE_ENTER 24
74#define XFS_IOMAP_ALLOC_MAP 25 74#define XFS_IOMAP_ALLOC_ENTER 25
75#define XFS_IOMAP_UNWRITTEN 26 75#define XFS_IOMAP_ALLOC_MAP 26
76#define XFS_IOMAP_UNWRITTEN 27
76extern void xfs_rw_enter_trace(int, struct xfs_iocore *, 77extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
77 void *, size_t, loff_t, int); 78 void *, size_t, loff_t, int);
78extern void xfs_inval_cached_trace(struct xfs_iocore *, 79extern void xfs_inval_cached_trace(struct xfs_iocore *,
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index f6dd7de25927..0da87bfc9999 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -70,11 +70,15 @@
70#include <linux/namei.h> 70#include <linux/namei.h>
71#include <linux/init.h> 71#include <linux/init.h>
72#include <linux/mount.h> 72#include <linux/mount.h>
73#include <linux/mempool.h>
73#include <linux/writeback.h> 74#include <linux/writeback.h>
75#include <linux/kthread.h>
74 76
75STATIC struct quotactl_ops linvfs_qops; 77STATIC struct quotactl_ops linvfs_qops;
76STATIC struct super_operations linvfs_sops; 78STATIC struct super_operations linvfs_sops;
77STATIC kmem_zone_t *linvfs_inode_zone; 79STATIC kmem_zone_t *xfs_vnode_zone;
80STATIC kmem_zone_t *xfs_ioend_zone;
81mempool_t *xfs_ioend_pool;
78 82
79STATIC struct xfs_mount_args * 83STATIC struct xfs_mount_args *
80xfs_args_allocate( 84xfs_args_allocate(
@@ -138,24 +142,25 @@ STATIC __inline__ void
138xfs_set_inodeops( 142xfs_set_inodeops(
139 struct inode *inode) 143 struct inode *inode)
140{ 144{
141 vnode_t *vp = LINVFS_GET_VP(inode); 145 switch (inode->i_mode & S_IFMT) {
142 146 case S_IFREG:
143 if (vp->v_type == VNON) {
144 vn_mark_bad(vp);
145 } else if (S_ISREG(inode->i_mode)) {
146 inode->i_op = &linvfs_file_inode_operations; 147 inode->i_op = &linvfs_file_inode_operations;
147 inode->i_fop = &linvfs_file_operations; 148 inode->i_fop = &linvfs_file_operations;
148 inode->i_mapping->a_ops = &linvfs_aops; 149 inode->i_mapping->a_ops = &linvfs_aops;
149 } else if (S_ISDIR(inode->i_mode)) { 150 break;
151 case S_IFDIR:
150 inode->i_op = &linvfs_dir_inode_operations; 152 inode->i_op = &linvfs_dir_inode_operations;
151 inode->i_fop = &linvfs_dir_operations; 153 inode->i_fop = &linvfs_dir_operations;
152 } else if (S_ISLNK(inode->i_mode)) { 154 break;
155 case S_IFLNK:
153 inode->i_op = &linvfs_symlink_inode_operations; 156 inode->i_op = &linvfs_symlink_inode_operations;
154 if (inode->i_blocks) 157 if (inode->i_blocks)
155 inode->i_mapping->a_ops = &linvfs_aops; 158 inode->i_mapping->a_ops = &linvfs_aops;
156 } else { 159 break;
160 default:
157 inode->i_op = &linvfs_file_inode_operations; 161 inode->i_op = &linvfs_file_inode_operations;
158 init_special_inode(inode, inode->i_mode, inode->i_rdev); 162 init_special_inode(inode, inode->i_mode, inode->i_rdev);
163 break;
159 } 164 }
160} 165}
161 166
@@ -167,16 +172,23 @@ xfs_revalidate_inode(
167{ 172{
168 struct inode *inode = LINVFS_GET_IP(vp); 173 struct inode *inode = LINVFS_GET_IP(vp);
169 174
170 inode->i_mode = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type); 175 inode->i_mode = ip->i_d.di_mode;
171 inode->i_nlink = ip->i_d.di_nlink; 176 inode->i_nlink = ip->i_d.di_nlink;
172 inode->i_uid = ip->i_d.di_uid; 177 inode->i_uid = ip->i_d.di_uid;
173 inode->i_gid = ip->i_d.di_gid; 178 inode->i_gid = ip->i_d.di_gid;
174 if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) { 179
180 switch (inode->i_mode & S_IFMT) {
181 case S_IFBLK:
182 case S_IFCHR:
183 inode->i_rdev =
184 MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff,
185 sysv_minor(ip->i_df.if_u2.if_rdev));
186 break;
187 default:
175 inode->i_rdev = 0; 188 inode->i_rdev = 0;
176 } else { 189 break;
177 xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
178 inode->i_rdev = MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
179 } 190 }
191
180 inode->i_blksize = PAGE_CACHE_SIZE; 192 inode->i_blksize = PAGE_CACHE_SIZE;
181 inode->i_generation = ip->i_d.di_gen; 193 inode->i_generation = ip->i_d.di_gen;
182 i_size_write(inode, ip->i_d.di_size); 194 i_size_write(inode, ip->i_d.di_size);
@@ -231,7 +243,6 @@ xfs_initialize_vnode(
231 * finish our work. 243 * finish our work.
232 */ 244 */
233 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) { 245 if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
234 vp->v_type = IFTOVT(ip->i_d.di_mode);
235 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); 246 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
236 xfs_set_inodeops(inode); 247 xfs_set_inodeops(inode);
237 248
@@ -274,8 +285,7 @@ linvfs_alloc_inode(
274{ 285{
275 vnode_t *vp; 286 vnode_t *vp;
276 287
277 vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_zone, 288 vp = kmem_cache_alloc(xfs_vnode_zone, kmem_flags_convert(KM_SLEEP));
278 kmem_flags_convert(KM_SLEEP));
279 if (!vp) 289 if (!vp)
280 return NULL; 290 return NULL;
281 return LINVFS_GET_IP(vp); 291 return LINVFS_GET_IP(vp);
@@ -285,11 +295,11 @@ STATIC void
285linvfs_destroy_inode( 295linvfs_destroy_inode(
286 struct inode *inode) 296 struct inode *inode)
287{ 297{
288 kmem_cache_free(linvfs_inode_zone, LINVFS_GET_VP(inode)); 298 kmem_zone_free(xfs_vnode_zone, LINVFS_GET_VP(inode));
289} 299}
290 300
291STATIC void 301STATIC void
292init_once( 302linvfs_inode_init_once(
293 void *data, 303 void *data,
294 kmem_cache_t *cachep, 304 kmem_cache_t *cachep,
295 unsigned long flags) 305 unsigned long flags)
@@ -302,21 +312,41 @@ init_once(
302} 312}
303 313
304STATIC int 314STATIC int
305init_inodecache( void ) 315linvfs_init_zones(void)
306{ 316{
307 linvfs_inode_zone = kmem_cache_create("linvfs_icache", 317 xfs_vnode_zone = kmem_cache_create("xfs_vnode",
308 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, 318 sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
309 init_once, NULL); 319 linvfs_inode_init_once, NULL);
310 if (linvfs_inode_zone == NULL) 320 if (!xfs_vnode_zone)
311 return -ENOMEM; 321 goto out;
322
323 xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
324 if (!xfs_ioend_zone)
325 goto out_destroy_vnode_zone;
326
327 xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE,
328 mempool_alloc_slab, mempool_free_slab,
329 xfs_ioend_zone);
330 if (!xfs_ioend_pool)
331 goto out_free_ioend_zone;
332
312 return 0; 333 return 0;
334
335
336 out_free_ioend_zone:
337 kmem_zone_destroy(xfs_ioend_zone);
338 out_destroy_vnode_zone:
339 kmem_zone_destroy(xfs_vnode_zone);
340 out:
341 return -ENOMEM;
313} 342}
314 343
315STATIC void 344STATIC void
316destroy_inodecache( void ) 345linvfs_destroy_zones(void)
317{ 346{
318 if (kmem_cache_destroy(linvfs_inode_zone)) 347 mempool_destroy(xfs_ioend_pool);
319 printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__); 348 kmem_zone_destroy(xfs_vnode_zone);
349 kmem_zone_destroy(xfs_ioend_zone);
320} 350}
321 351
322/* 352/*
@@ -354,17 +384,38 @@ linvfs_clear_inode(
354 struct inode *inode) 384 struct inode *inode)
355{ 385{
356 vnode_t *vp = LINVFS_GET_VP(inode); 386 vnode_t *vp = LINVFS_GET_VP(inode);
387 int error, cache;
357 388
358 if (vp) { 389 vn_trace_entry(vp, "clear_inode", (inst_t *)__return_address);
359 vn_rele(vp); 390
360 vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); 391 XFS_STATS_INC(vn_rele);
361 /* 392 XFS_STATS_INC(vn_remove);
362 * Do all our cleanup, and remove this vnode. 393 XFS_STATS_INC(vn_reclaim);
363 */ 394 XFS_STATS_DEC(vn_active);
364 vn_remove(vp); 395
396 /*
397 * This can happen because xfs_iget_core calls xfs_idestroy if we
398 * find an inode with di_mode == 0 but without IGET_CREATE set.
399 */
400 if (vp->v_fbhv)
401 VOP_INACTIVE(vp, NULL, cache);
402
403 VN_LOCK(vp);
404 vp->v_flag &= ~VMODIFIED;
405 VN_UNLOCK(vp, 0);
406
407 if (vp->v_fbhv) {
408 VOP_RECLAIM(vp, error);
409 if (error)
410 panic("vn_purge: cannot reclaim");
365 } 411 }
366}
367 412
413 ASSERT(vp->v_fbhv == NULL);
414
415#ifdef XFS_VNODE_TRACE
416 ktrace_free(vp->v_trace);
417#endif
418}
368 419
369/* 420/*
370 * Enqueue a work item to be picked up by the vfs xfssyncd thread. 421 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
@@ -466,25 +517,16 @@ xfssyncd(
466{ 517{
467 long timeleft; 518 long timeleft;
468 vfs_t *vfsp = (vfs_t *) arg; 519 vfs_t *vfsp = (vfs_t *) arg;
469 struct list_head tmp;
470 struct vfs_sync_work *work, *n; 520 struct vfs_sync_work *work, *n;
521 LIST_HEAD (tmp);
471 522
472 daemonize("xfssyncd");
473
474 vfsp->vfs_sync_work.w_vfs = vfsp;
475 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
476 vfsp->vfs_sync_task = current;
477 wmb();
478 wake_up(&vfsp->vfs_wait_sync_task);
479
480 INIT_LIST_HEAD(&tmp);
481 timeleft = (xfs_syncd_centisecs * HZ) / 100; 523 timeleft = (xfs_syncd_centisecs * HZ) / 100;
482 for (;;) { 524 for (;;) {
483 set_current_state(TASK_INTERRUPTIBLE); 525 set_current_state(TASK_INTERRUPTIBLE);
484 timeleft = schedule_timeout(timeleft); 526 timeleft = schedule_timeout(timeleft);
485 /* swsusp */ 527 /* swsusp */
486 try_to_freeze(); 528 try_to_freeze();
487 if (vfsp->vfs_flag & VFS_UMOUNT) 529 if (kthread_should_stop())
488 break; 530 break;
489 531
490 spin_lock(&vfsp->vfs_sync_lock); 532 spin_lock(&vfsp->vfs_sync_lock);
@@ -513,10 +555,6 @@ xfssyncd(
513 } 555 }
514 } 556 }
515 557
516 vfsp->vfs_sync_task = NULL;
517 wmb();
518 wake_up(&vfsp->vfs_wait_sync_task);
519
520 return 0; 558 return 0;
521} 559}
522 560
@@ -524,13 +562,11 @@ STATIC int
524linvfs_start_syncd( 562linvfs_start_syncd(
525 vfs_t *vfsp) 563 vfs_t *vfsp)
526{ 564{
527 int pid; 565 vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
528 566 vfsp->vfs_sync_work.w_vfs = vfsp;
529 pid = kernel_thread(xfssyncd, (void *) vfsp, 567 vfsp->vfs_sync_task = kthread_run(xfssyncd, vfsp, "xfssyncd");
530 CLONE_VM | CLONE_FS | CLONE_FILES); 568 if (IS_ERR(vfsp->vfs_sync_task))
531 if (pid < 0) 569 return -PTR_ERR(vfsp->vfs_sync_task);
532 return -pid;
533 wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
534 return 0; 570 return 0;
535} 571}
536 572
@@ -538,11 +574,7 @@ STATIC void
538linvfs_stop_syncd( 574linvfs_stop_syncd(
539 vfs_t *vfsp) 575 vfs_t *vfsp)
540{ 576{
541 vfsp->vfs_flag |= VFS_UMOUNT; 577 kthread_stop(vfsp->vfs_sync_task);
542 wmb();
543
544 wake_up_process(vfsp->vfs_sync_task);
545 wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
546} 578}
547 579
548STATIC void 580STATIC void
@@ -866,9 +898,9 @@ init_xfs_fs( void )
866 898
867 ktrace_init(64); 899 ktrace_init(64);
868 900
869 error = init_inodecache(); 901 error = linvfs_init_zones();
870 if (error < 0) 902 if (error < 0)
871 goto undo_inodecache; 903 goto undo_zones;
872 904
873 error = pagebuf_init(); 905 error = pagebuf_init();
874 if (error < 0) 906 if (error < 0)
@@ -889,9 +921,9 @@ undo_register:
889 pagebuf_terminate(); 921 pagebuf_terminate();
890 922
891undo_pagebuf: 923undo_pagebuf:
892 destroy_inodecache(); 924 linvfs_destroy_zones();
893 925
894undo_inodecache: 926undo_zones:
895 return error; 927 return error;
896} 928}
897 929
@@ -903,7 +935,7 @@ exit_xfs_fs( void )
903 unregister_filesystem(&xfs_fs_type); 935 unregister_filesystem(&xfs_fs_type);
904 xfs_cleanup(); 936 xfs_cleanup();
905 pagebuf_terminate(); 937 pagebuf_terminate();
906 destroy_inodecache(); 938 linvfs_destroy_zones();
907 ktrace_uninit(); 939 ktrace_uninit();
908} 940}
909 941
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
index 669c61644959..34cc902ec119 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.c
+++ b/fs/xfs/linux-2.6/xfs_vfs.c
@@ -251,7 +251,6 @@ vfs_allocate( void )
251 bhv_head_init(VFS_BHVHEAD(vfsp), "vfs"); 251 bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
252 INIT_LIST_HEAD(&vfsp->vfs_sync_list); 252 INIT_LIST_HEAD(&vfsp->vfs_sync_list);
253 spin_lock_init(&vfsp->vfs_sync_lock); 253 spin_lock_init(&vfsp->vfs_sync_lock);
254 init_waitqueue_head(&vfsp->vfs_wait_sync_task);
255 init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); 254 init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
256 return vfsp; 255 return vfsp;
257} 256}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 7ee1f714e9ba..f0ab574fb47a 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -65,7 +65,6 @@ typedef struct vfs {
65 spinlock_t vfs_sync_lock; /* work item list lock */ 65 spinlock_t vfs_sync_lock; /* work item list lock */
66 int vfs_sync_seq; /* sync thread generation no. */ 66 int vfs_sync_seq; /* sync thread generation no. */
67 wait_queue_head_t vfs_wait_single_sync_task; 67 wait_queue_head_t vfs_wait_single_sync_task;
68 wait_queue_head_t vfs_wait_sync_task;
69} vfs_t; 68} vfs_t;
70 69
71#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */ 70#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */
@@ -96,7 +95,6 @@ typedef enum {
96#define VFS_RDONLY 0x0001 /* read-only vfs */ 95#define VFS_RDONLY 0x0001 /* read-only vfs */
97#define VFS_GRPID 0x0002 /* group-ID assigned from directory */ 96#define VFS_GRPID 0x0002 /* group-ID assigned from directory */
98#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */ 97#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */
99#define VFS_UMOUNT 0x0008 /* unmount in progress */
100#define VFS_END 0x0008 /* max flag */ 98#define VFS_END 0x0008 /* max flag */
101 99
102#define SYNC_ATTR 0x0001 /* sync attributes */ 100#define SYNC_ATTR 0x0001 /* sync attributes */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
index 250cad54e892..268f45bf6a9a 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.c
+++ b/fs/xfs/linux-2.6/xfs_vnode.c
@@ -42,93 +42,33 @@ DEFINE_SPINLOCK(vnumber_lock);
42 */ 42 */
43#define NVSYNC 37 43#define NVSYNC 37
44#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) 44#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC])
45sv_t vsync[NVSYNC]; 45STATIC wait_queue_head_t vsync[NVSYNC];
46
47/*
48 * Translate stat(2) file types to vnode types and vice versa.
49 * Aware of numeric order of S_IFMT and vnode type values.
50 */
51enum vtype iftovt_tab[] = {
52 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
53 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
54};
55
56u_short vttoif_tab[] = {
57 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK
58};
59 46
60 47
61void 48void
62vn_init(void) 49vn_init(void)
63{ 50{
64 register sv_t *svp; 51 int i;
65 register int i;
66 52
67 for (svp = vsync, i = 0; i < NVSYNC; i++, svp++) 53 for (i = 0; i < NVSYNC; i++)
68 init_sv(svp, SV_DEFAULT, "vsy", i); 54 init_waitqueue_head(&vsync[i]);
69} 55}
70 56
71/* 57void
72 * Clean a vnode of filesystem-specific data and prepare it for reuse. 58vn_iowait(
73 */
74STATIC int
75vn_reclaim(
76 struct vnode *vp) 59 struct vnode *vp)
77{ 60{
78 int error; 61 wait_queue_head_t *wq = vptosync(vp);
79 62
80 XFS_STATS_INC(vn_reclaim); 63 wait_event(*wq, (atomic_read(&vp->v_iocount) == 0));
81 vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);
82
83 /*
84 * Only make the VOP_RECLAIM call if there are behaviors
85 * to call.
86 */
87 if (vp->v_fbhv) {
88 VOP_RECLAIM(vp, error);
89 if (error)
90 return -error;
91 }
92 ASSERT(vp->v_fbhv == NULL);
93
94 VN_LOCK(vp);
95 vp->v_flag &= (VRECLM|VWAIT);
96 VN_UNLOCK(vp, 0);
97
98 vp->v_type = VNON;
99 vp->v_fbhv = NULL;
100
101#ifdef XFS_VNODE_TRACE
102 ktrace_free(vp->v_trace);
103 vp->v_trace = NULL;
104#endif
105
106 return 0;
107}
108
109STATIC void
110vn_wakeup(
111 struct vnode *vp)
112{
113 VN_LOCK(vp);
114 if (vp->v_flag & VWAIT)
115 sv_broadcast(vptosync(vp));
116 vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
117 VN_UNLOCK(vp, 0);
118} 64}
119 65
120int 66void
121vn_wait( 67vn_iowake(
122 struct vnode *vp) 68 struct vnode *vp)
123{ 69{
124 VN_LOCK(vp); 70 if (atomic_dec_and_test(&vp->v_iocount))
125 if (vp->v_flag & (VINACT | VRECLM)) { 71 wake_up(vptosync(vp));
126 vp->v_flag |= VWAIT;
127 sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
128 return 1;
129 }
130 VN_UNLOCK(vp, 0);
131 return 0;
132} 72}
133 73
134struct vnode * 74struct vnode *
@@ -154,6 +94,8 @@ vn_initialize(
154 /* Initialize the first behavior and the behavior chain head. */ 94 /* Initialize the first behavior and the behavior chain head. */
155 vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode"); 95 vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
156 96
97 atomic_set(&vp->v_iocount, 0);
98
157#ifdef XFS_VNODE_TRACE 99#ifdef XFS_VNODE_TRACE
158 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); 100 vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
159#endif /* XFS_VNODE_TRACE */ 101#endif /* XFS_VNODE_TRACE */
@@ -163,30 +105,6 @@ vn_initialize(
163} 105}
164 106
165/* 107/*
166 * Get a reference on a vnode.
167 */
168vnode_t *
169vn_get(
170 struct vnode *vp,
171 vmap_t *vmap)
172{
173 struct inode *inode;
174
175 XFS_STATS_INC(vn_get);
176 inode = LINVFS_GET_IP(vp);
177 if (inode->i_state & I_FREEING)
178 return NULL;
179
180 inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino);
181 if (!inode) /* Inode not present */
182 return NULL;
183
184 vn_trace_exit(vp, "vn_get", (inst_t *)__return_address);
185
186 return vp;
187}
188
189/*
190 * Revalidate the Linux inode from the vattr. 108 * Revalidate the Linux inode from the vattr.
191 * Note: i_size _not_ updated; we must hold the inode 109 * Note: i_size _not_ updated; we must hold the inode
192 * semaphore when doing that - callers responsibility. 110 * semaphore when doing that - callers responsibility.
@@ -198,7 +116,7 @@ vn_revalidate_core(
198{ 116{
199 struct inode *inode = LINVFS_GET_IP(vp); 117 struct inode *inode = LINVFS_GET_IP(vp);
200 118
201 inode->i_mode = VTTOIF(vap->va_type) | vap->va_mode; 119 inode->i_mode = vap->va_mode;
202 inode->i_nlink = vap->va_nlink; 120 inode->i_nlink = vap->va_nlink;
203 inode->i_uid = vap->va_uid; 121 inode->i_uid = vap->va_uid;
204 inode->i_gid = vap->va_gid; 122 inode->i_gid = vap->va_gid;
@@ -247,71 +165,6 @@ vn_revalidate(
247} 165}
248 166
249/* 167/*
250 * purge a vnode from the cache
251 * At this point the vnode is guaranteed to have no references (vn_count == 0)
252 * The caller has to make sure that there are no ways someone could
253 * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
254 */
255void
256vn_purge(
257 struct vnode *vp,
258 vmap_t *vmap)
259{
260 vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);
261
262again:
263 /*
264 * Check whether vp has already been reclaimed since our caller
265 * sampled its version while holding a filesystem cache lock that
266 * its VOP_RECLAIM function acquires.
267 */
268 VN_LOCK(vp);
269 if (vp->v_number != vmap->v_number) {
270 VN_UNLOCK(vp, 0);
271 return;
272 }
273
274 /*
275 * If vp is being reclaimed or inactivated, wait until it is inert,
276 * then proceed. Can't assume that vnode is actually reclaimed
277 * just because the reclaimed flag is asserted -- a vn_alloc
278 * reclaim can fail.
279 */
280 if (vp->v_flag & (VINACT | VRECLM)) {
281 ASSERT(vn_count(vp) == 0);
282 vp->v_flag |= VWAIT;
283 sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
284 goto again;
285 }
286
287 /*
288 * Another process could have raced in and gotten this vnode...
289 */
290 if (vn_count(vp) > 0) {
291 VN_UNLOCK(vp, 0);
292 return;
293 }
294
295 XFS_STATS_DEC(vn_active);
296 vp->v_flag |= VRECLM;
297 VN_UNLOCK(vp, 0);
298
299 /*
300 * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
301 * vp's filesystem to flush and invalidate all cached resources.
302 * When vn_reclaim returns, vp should have no private data,
303 * either in a system cache or attached to v_data.
304 */
305 if (vn_reclaim(vp) != 0)
306 panic("vn_purge: cannot reclaim");
307
308 /*
309 * Wakeup anyone waiting for vp to be reclaimed.
310 */
311 vn_wakeup(vp);
312}
313
314/*
315 * Add a reference to a referenced vnode. 168 * Add a reference to a referenced vnode.
316 */ 169 */
317struct vnode * 170struct vnode *
@@ -330,80 +183,6 @@ vn_hold(
330 return vp; 183 return vp;
331} 184}
332 185
333/*
334 * Call VOP_INACTIVE on last reference.
335 */
336void
337vn_rele(
338 struct vnode *vp)
339{
340 int vcnt;
341 int cache;
342
343 XFS_STATS_INC(vn_rele);
344
345 VN_LOCK(vp);
346
347 vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
348 vcnt = vn_count(vp);
349
350 /*
351 * Since we always get called from put_inode we know
352 * that i_count won't be decremented after we
353 * return.
354 */
355 if (!vcnt) {
356 /*
357 * As soon as we turn this on, noone can find us in vn_get
358 * until we turn off VINACT or VRECLM
359 */
360 vp->v_flag |= VINACT;
361 VN_UNLOCK(vp, 0);
362
363 /*
364 * Do not make the VOP_INACTIVE call if there
365 * are no behaviors attached to the vnode to call.
366 */
367 if (vp->v_fbhv)
368 VOP_INACTIVE(vp, NULL, cache);
369
370 VN_LOCK(vp);
371 if (vp->v_flag & VWAIT)
372 sv_broadcast(vptosync(vp));
373
374 vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
375 }
376
377 VN_UNLOCK(vp, 0);
378
379 vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
380}
381
382/*
383 * Finish the removal of a vnode.
384 */
385void
386vn_remove(
387 struct vnode *vp)
388{
389 vmap_t vmap;
390
391 /* Make sure we don't do this to the same vnode twice */
392 if (!(vp->v_fbhv))
393 return;
394
395 XFS_STATS_INC(vn_remove);
396 vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address);
397
398 /*
399 * After the following purge the vnode
400 * will no longer exist.
401 */
402 VMAP(vp, vmap);
403 vn_purge(vp, &vmap);
404}
405
406
407#ifdef XFS_VNODE_TRACE 186#ifdef XFS_VNODE_TRACE
408 187
409#define KTRACE_ENTER(vp, vk, s, line, ra) \ 188#define KTRACE_ENTER(vp, vk, s, line, ra) \
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index a6e57c647be4..35f306cebb87 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -65,10 +65,6 @@ struct vattr;
65struct xfs_iomap; 65struct xfs_iomap;
66struct attrlist_cursor_kern; 66struct attrlist_cursor_kern;
67 67
68/*
69 * Vnode types. VNON means no type.
70 */
71enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VFIFO, VBAD, VSOCK };
72 68
73typedef xfs_ino_t vnumber_t; 69typedef xfs_ino_t vnumber_t;
74typedef struct dentry vname_t; 70typedef struct dentry vname_t;
@@ -77,15 +73,14 @@ typedef bhv_head_t vn_bhv_head_t;
77/* 73/*
78 * MP locking protocols: 74 * MP locking protocols:
79 * v_flag, v_vfsp VN_LOCK/VN_UNLOCK 75 * v_flag, v_vfsp VN_LOCK/VN_UNLOCK
80 * v_type read-only or fs-dependent
81 */ 76 */
82typedef struct vnode { 77typedef struct vnode {
83 __u32 v_flag; /* vnode flags (see below) */ 78 __u32 v_flag; /* vnode flags (see below) */
84 enum vtype v_type; /* vnode type */
85 struct vfs *v_vfsp; /* ptr to containing VFS */ 79 struct vfs *v_vfsp; /* ptr to containing VFS */
86 vnumber_t v_number; /* in-core vnode number */ 80 vnumber_t v_number; /* in-core vnode number */
87 vn_bhv_head_t v_bh; /* behavior head */ 81 vn_bhv_head_t v_bh; /* behavior head */
88 spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */ 82 spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */
83 atomic_t v_iocount; /* outstanding I/O count */
89#ifdef XFS_VNODE_TRACE 84#ifdef XFS_VNODE_TRACE
90 struct ktrace *v_trace; /* trace header structure */ 85 struct ktrace *v_trace; /* trace header structure */
91#endif 86#endif
@@ -93,6 +88,12 @@ typedef struct vnode {
93 /* inode MUST be last */ 88 /* inode MUST be last */
94} vnode_t; 89} vnode_t;
95 90
91#define VN_ISLNK(vp) S_ISLNK((vp)->v_inode.i_mode)
92#define VN_ISREG(vp) S_ISREG((vp)->v_inode.i_mode)
93#define VN_ISDIR(vp) S_ISDIR((vp)->v_inode.i_mode)
94#define VN_ISCHR(vp) S_ISCHR((vp)->v_inode.i_mode)
95#define VN_ISBLK(vp) S_ISBLK((vp)->v_inode.i_mode)
96
96#define v_fbhv v_bh.bh_first /* first behavior */ 97#define v_fbhv v_bh.bh_first /* first behavior */
97#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */ 98#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */
98 99
@@ -133,22 +134,8 @@ typedef enum {
133#define LINVFS_GET_IP(vp) (&(vp)->v_inode) 134#define LINVFS_GET_IP(vp) (&(vp)->v_inode)
134 135
135/* 136/*
136 * Convert between vnode types and inode formats (since POSIX.1
137 * defines mode word of stat structure in terms of inode formats).
138 */
139extern enum vtype iftovt_tab[];
140extern u_short vttoif_tab[];
141#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12])
142#define VTTOIF(indx) (vttoif_tab[(int)(indx)])
143#define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode))
144
145
146/*
147 * Vnode flags. 137 * Vnode flags.
148 */ 138 */
149#define VINACT 0x1 /* vnode is being inactivated */
150#define VRECLM 0x2 /* vnode is being reclaimed */
151#define VWAIT 0x4 /* waiting for VINACT/VRECLM to end */
152#define VMODIFIED 0x8 /* XFS inode state possibly differs */ 139#define VMODIFIED 0x8 /* XFS inode state possibly differs */
153 /* to the Linux inode state. */ 140 /* to the Linux inode state. */
154 141
@@ -408,7 +395,6 @@ typedef struct vnodeops {
408 */ 395 */
409typedef struct vattr { 396typedef struct vattr {
410 int va_mask; /* bit-mask of attributes present */ 397 int va_mask; /* bit-mask of attributes present */
411 enum vtype va_type; /* vnode type (for create) */
412 mode_t va_mode; /* file access mode and type */ 398 mode_t va_mode; /* file access mode and type */
413 xfs_nlink_t va_nlink; /* number of references to file */ 399 xfs_nlink_t va_nlink; /* number of references to file */
414 uid_t va_uid; /* owner user id */ 400 uid_t va_uid; /* owner user id */
@@ -498,27 +484,12 @@ typedef struct vattr {
498 * Check whether mandatory file locking is enabled. 484 * Check whether mandatory file locking is enabled.
499 */ 485 */
500#define MANDLOCK(vp, mode) \ 486#define MANDLOCK(vp, mode) \
501 ((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) 487 (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
502 488
503extern void vn_init(void); 489extern void vn_init(void);
504extern int vn_wait(struct vnode *);
505extern vnode_t *vn_initialize(struct inode *); 490extern vnode_t *vn_initialize(struct inode *);
506 491
507/* 492/*
508 * Acquiring and invalidating vnodes:
509 *
510 * if (vn_get(vp, version, 0))
511 * ...;
512 * vn_purge(vp, version);
513 *
514 * vn_get and vn_purge must be called with vmap_t arguments, sampled
515 * while a lock that the vnode's VOP_RECLAIM function acquires is
516 * held, to ensure that the vnode sampled with the lock held isn't
517 * recycled (VOP_RECLAIMed) or deallocated between the release of the lock
518 * and the subsequent vn_get or vn_purge.
519 */
520
521/*
522 * vnode_map structures _must_ match vn_epoch and vnode structure sizes. 493 * vnode_map structures _must_ match vn_epoch and vnode structure sizes.
523 */ 494 */
524typedef struct vnode_map { 495typedef struct vnode_map {
@@ -531,11 +502,11 @@ typedef struct vnode_map {
531 (vmap).v_number = (vp)->v_number, \ 502 (vmap).v_number = (vp)->v_number, \
532 (vmap).v_ino = (vp)->v_inode.i_ino; } 503 (vmap).v_ino = (vp)->v_inode.i_ino; }
533 504
534extern void vn_purge(struct vnode *, vmap_t *);
535extern vnode_t *vn_get(struct vnode *, vmap_t *);
536extern int vn_revalidate(struct vnode *); 505extern int vn_revalidate(struct vnode *);
537extern void vn_revalidate_core(struct vnode *, vattr_t *); 506extern void vn_revalidate_core(struct vnode *, vattr_t *);
538extern void vn_remove(struct vnode *); 507
508extern void vn_iowait(struct vnode *vp);
509extern void vn_iowake(struct vnode *vp);
539 510
540static inline int vn_count(struct vnode *vp) 511static inline int vn_count(struct vnode *vp)
541{ 512{
@@ -546,7 +517,6 @@ static inline int vn_count(struct vnode *vp)
546 * Vnode reference counting functions (and macros for compatibility). 517 * Vnode reference counting functions (and macros for compatibility).
547 */ 518 */
548extern vnode_t *vn_hold(struct vnode *); 519extern vnode_t *vn_hold(struct vnode *);
549extern void vn_rele(struct vnode *);
550 520
551#if defined(XFS_VNODE_TRACE) 521#if defined(XFS_VNODE_TRACE)
552#define VN_HOLD(vp) \ 522#define VN_HOLD(vp) \
@@ -560,6 +530,12 @@ extern void vn_rele(struct vnode *);
560#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp))) 530#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp)))
561#endif 531#endif
562 532
533static inline struct vnode *vn_grab(struct vnode *vp)
534{
535 struct inode *inode = igrab(LINVFS_GET_IP(vp));
536 return inode ? LINVFS_GET_VP(inode) : NULL;
537}
538
563/* 539/*
564 * Vname handling macros. 540 * Vname handling macros.
565 */ 541 */
diff --git a/fs/xfs/quota/Makefile b/fs/xfs/quota/Makefile
new file mode 100644
index 000000000000..7a4f725b2824
--- /dev/null
+++ b/fs/xfs/quota/Makefile
@@ -0,0 +1 @@
include $(TOPDIR)/fs/xfs/quota/Makefile-linux-$(VERSION).$(PATCHLEVEL)
diff --git a/fs/xfs/quota/Makefile-linux-2.6 b/fs/xfs/quota/Makefile-linux-2.6
new file mode 100644
index 000000000000..8b7b676718b9
--- /dev/null
+++ b/fs/xfs/quota/Makefile-linux-2.6
@@ -0,0 +1,53 @@
1#
2# Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
3#
4# This program is free software; you can redistribute it and/or modify it
5# under the terms of version 2 of the GNU General Public License as
6# published by the Free Software Foundation.
7#
8# This program is distributed in the hope that it would be useful, but
9# WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11#
12# Further, this software is distributed without any warranty that it is
13# free of the rightful claim of any third person regarding infringement
14# or the like. Any license provided herein, whether implied or
15# otherwise, applies only to this software file. Patent licenses, if
16# any, provided herein do not apply to combinations of this program with
17# other software, or any other product whatsoever.
18#
19# You should have received a copy of the GNU General Public License along
20# with this program; if not, write the Free Software Foundation, Inc., 59
21# Temple Place - Suite 330, Boston MA 02111-1307, USA.
22#
23# Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24# Mountain View, CA 94043, or:
25#
26# http://www.sgi.com
27#
28# For further information regarding this notice, see:
29#
30# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31#
32
33EXTRA_CFLAGS += -I $(TOPDIR)/fs/xfs -I $(TOPDIR)/fs/xfs/linux-2.6
34
35ifeq ($(CONFIG_XFS_DEBUG),y)
36 EXTRA_CFLAGS += -g -DDEBUG
37 #EXTRA_CFLAGS += -DQUOTADEBUG
38endif
39ifeq ($(CONFIG_XFS_TRACE),y)
40 EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
41 EXTRA_CFLAGS += -DXFS_VNODE_TRACE
42endif
43
44obj-$(CONFIG_XFS_QUOTA) += xfs_quota.o
45
46xfs_quota-y += xfs_dquot.o \
47 xfs_dquot_item.o \
48 xfs_trans_dquot.o \
49 xfs_qm_syscalls.o \
50 xfs_qm_bhv.o \
51 xfs_qm.o
52
53xfs_quota-$(CONFIG_PROC_FS) += xfs_qm_stats.o
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 46ce1e3ce1d6..e2e8d35fa4d0 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -421,7 +421,7 @@ xfs_qm_init_dquot_blk(
421 */ 421 */
422STATIC int 422STATIC int
423xfs_qm_dqalloc( 423xfs_qm_dqalloc(
424 xfs_trans_t *tp, 424 xfs_trans_t **tpp,
425 xfs_mount_t *mp, 425 xfs_mount_t *mp,
426 xfs_dquot_t *dqp, 426 xfs_dquot_t *dqp,
427 xfs_inode_t *quotip, 427 xfs_inode_t *quotip,
@@ -433,6 +433,7 @@ xfs_qm_dqalloc(
433 xfs_bmbt_irec_t map; 433 xfs_bmbt_irec_t map;
434 int nmaps, error, committed; 434 int nmaps, error, committed;
435 xfs_buf_t *bp; 435 xfs_buf_t *bp;
436 xfs_trans_t *tp = *tpp;
436 437
437 ASSERT(tp != NULL); 438 ASSERT(tp != NULL);
438 xfs_dqtrace_entry(dqp, "DQALLOC"); 439 xfs_dqtrace_entry(dqp, "DQALLOC");
@@ -492,10 +493,32 @@ xfs_qm_dqalloc(
492 xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT), 493 xfs_qm_init_dquot_blk(tp, mp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT),
493 dqp->dq_flags & XFS_DQ_ALLTYPES, bp); 494 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
494 495
495 if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed))) { 496 /*
497 * xfs_bmap_finish() may commit the current transaction and
498 * start a second transaction if the freelist is not empty.
499 *
500 * Since we still want to modify this buffer, we need to
501 * ensure that the buffer is not released on commit of
502 * the first transaction and ensure the buffer is added to the
503 * second transaction.
504 *
505 * If there is only one transaction then don't stop the buffer
506 * from being released when it commits later on.
507 */
508
509 xfs_trans_bhold(tp, bp);
510
511 if ((error = xfs_bmap_finish(tpp, &flist, firstblock, &committed))) {
496 goto error1; 512 goto error1;
497 } 513 }
498 514
515 if (committed) {
516 tp = *tpp;
517 xfs_trans_bjoin(tp, bp);
518 } else {
519 xfs_trans_bhold_release(tp, bp);
520 }
521
499 *O_bpp = bp; 522 *O_bpp = bp;
500 return 0; 523 return 0;
501 524
@@ -514,7 +537,7 @@ xfs_qm_dqalloc(
514 */ 537 */
515STATIC int 538STATIC int
516xfs_qm_dqtobp( 539xfs_qm_dqtobp(
517 xfs_trans_t *tp, 540 xfs_trans_t **tpp,
518 xfs_dquot_t *dqp, 541 xfs_dquot_t *dqp,
519 xfs_disk_dquot_t **O_ddpp, 542 xfs_disk_dquot_t **O_ddpp,
520 xfs_buf_t **O_bpp, 543 xfs_buf_t **O_bpp,
@@ -528,6 +551,7 @@ xfs_qm_dqtobp(
528 xfs_disk_dquot_t *ddq; 551 xfs_disk_dquot_t *ddq;
529 xfs_dqid_t id; 552 xfs_dqid_t id;
530 boolean_t newdquot; 553 boolean_t newdquot;
554 xfs_trans_t *tp = (tpp ? *tpp : NULL);
531 555
532 mp = dqp->q_mount; 556 mp = dqp->q_mount;
533 id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT); 557 id = INT_GET(dqp->q_core.d_id, ARCH_CONVERT);
@@ -579,9 +603,10 @@ xfs_qm_dqtobp(
579 return (ENOENT); 603 return (ENOENT);
580 604
581 ASSERT(tp); 605 ASSERT(tp);
582 if ((error = xfs_qm_dqalloc(tp, mp, dqp, quotip, 606 if ((error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
583 dqp->q_fileoffset, &bp))) 607 dqp->q_fileoffset, &bp)))
584 return (error); 608 return (error);
609 tp = *tpp;
585 newdquot = B_TRUE; 610 newdquot = B_TRUE;
586 } else { 611 } else {
587 /* 612 /*
@@ -645,7 +670,7 @@ xfs_qm_dqtobp(
645/* ARGSUSED */ 670/* ARGSUSED */
646STATIC int 671STATIC int
647xfs_qm_dqread( 672xfs_qm_dqread(
648 xfs_trans_t *tp, 673 xfs_trans_t **tpp,
649 xfs_dqid_t id, 674 xfs_dqid_t id,
650 xfs_dquot_t *dqp, /* dquot to get filled in */ 675 xfs_dquot_t *dqp, /* dquot to get filled in */
651 uint flags) 676 uint flags)
@@ -653,15 +678,19 @@ xfs_qm_dqread(
653 xfs_disk_dquot_t *ddqp; 678 xfs_disk_dquot_t *ddqp;
654 xfs_buf_t *bp; 679 xfs_buf_t *bp;
655 int error; 680 int error;
681 xfs_trans_t *tp;
682
683 ASSERT(tpp);
656 684
657 /* 685 /*
658 * get a pointer to the on-disk dquot and the buffer containing it 686 * get a pointer to the on-disk dquot and the buffer containing it
659 * dqp already knows its own type (GROUP/USER). 687 * dqp already knows its own type (GROUP/USER).
660 */ 688 */
661 xfs_dqtrace_entry(dqp, "DQREAD"); 689 xfs_dqtrace_entry(dqp, "DQREAD");
662 if ((error = xfs_qm_dqtobp(tp, dqp, &ddqp, &bp, flags))) { 690 if ((error = xfs_qm_dqtobp(tpp, dqp, &ddqp, &bp, flags))) {
663 return (error); 691 return (error);
664 } 692 }
693 tp = *tpp;
665 694
666 /* copy everything from disk dquot to the incore dquot */ 695 /* copy everything from disk dquot to the incore dquot */
667 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t)); 696 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
@@ -740,7 +769,7 @@ xfs_qm_idtodq(
740 * Read it from disk; xfs_dqread() takes care of 769 * Read it from disk; xfs_dqread() takes care of
741 * all the necessary initialization of dquot's fields (locks, etc) 770 * all the necessary initialization of dquot's fields (locks, etc)
742 */ 771 */
743 if ((error = xfs_qm_dqread(tp, id, dqp, flags))) { 772 if ((error = xfs_qm_dqread(&tp, id, dqp, flags))) {
744 /* 773 /*
745 * This can happen if quotas got turned off (ESRCH), 774 * This can happen if quotas got turned off (ESRCH),
746 * or if the dquot didn't exist on disk and we ask to 775 * or if the dquot didn't exist on disk and we ask to
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 39175103c8e0..8ebc87176c78 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -113,20 +113,6 @@ typedef struct xfs_dquot {
113 113
114#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++) 114#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
115 115
116/*
117 * Quota Accounting/Enforcement flags
118 */
119#define XFS_ALL_QUOTA_ACCT \
120 (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
121#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD)
122#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD)
123
124#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
125#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD)
126#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
127#define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT)
128#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
129
130#ifdef DEBUG 116#ifdef DEBUG
131static inline int 117static inline int
132XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp) 118XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index f5271b7b1e84..e74eaa7dd1bc 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -509,6 +509,7 @@ xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
509 509
510 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format); 510 log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
511 log_vector->i_len = sizeof(xfs_qoff_logitem_t); 511 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
512 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
512 qf->qql_format.qf_size = 1; 513 qf->qql_format.qf_size = 1;
513} 514}
514 515
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index f665ca8f9e96..efde16e0a913 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -365,16 +365,6 @@ xfs_qm_mount_quotas(
365 int error = 0; 365 int error = 0;
366 uint sbf; 366 uint sbf;
367 367
368 /*
369 * If a file system had quotas running earlier, but decided to
370 * mount without -o uquota/pquota/gquota options, revoke the
371 * quotachecked license, and bail out.
372 */
373 if (! XFS_IS_QUOTA_ON(mp) &&
374 (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT)) {
375 mp->m_qflags = 0;
376 goto write_changes;
377 }
378 368
379 /* 369 /*
380 * If quotas on realtime volumes is not supported, we disable 370 * If quotas on realtime volumes is not supported, we disable
@@ -388,11 +378,8 @@ xfs_qm_mount_quotas(
388 goto write_changes; 378 goto write_changes;
389 } 379 }
390 380
391#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
392 cmn_err(CE_NOTE, "Attempting to turn on disk quotas.");
393#endif
394
395 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 381 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
382
396 /* 383 /*
397 * Allocate the quotainfo structure inside the mount struct, and 384 * Allocate the quotainfo structure inside the mount struct, and
398 * create quotainode(s), and change/rev superblock if necessary. 385 * create quotainode(s), and change/rev superblock if necessary.
@@ -410,19 +397,14 @@ xfs_qm_mount_quotas(
410 */ 397 */
411 if (XFS_QM_NEED_QUOTACHECK(mp) && 398 if (XFS_QM_NEED_QUOTACHECK(mp) &&
412 !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) { 399 !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
413#ifdef DEBUG
414 cmn_err(CE_NOTE, "Doing a quotacheck. Please wait.");
415#endif
416 if ((error = xfs_qm_quotacheck(mp))) { 400 if ((error = xfs_qm_quotacheck(mp))) {
417 /* Quotacheck has failed and quotas have 401 /* Quotacheck has failed and quotas have
418 * been disabled. 402 * been disabled.
419 */ 403 */
420 return XFS_ERROR(error); 404 return XFS_ERROR(error);
421 } 405 }
422#ifdef DEBUG
423 cmn_err(CE_NOTE, "Done quotacheck.");
424#endif
425 } 406 }
407
426 write_changes: 408 write_changes:
427 /* 409 /*
428 * We actually don't have to acquire the SB_LOCK at all. 410 * We actually don't have to acquire the SB_LOCK at all.
@@ -2010,7 +1992,7 @@ xfs_qm_quotacheck(
2010 ASSERT(mp->m_quotainfo != NULL); 1992 ASSERT(mp->m_quotainfo != NULL);
2011 ASSERT(xfs_Gqm != NULL); 1993 ASSERT(xfs_Gqm != NULL);
2012 xfs_qm_destroy_quotainfo(mp); 1994 xfs_qm_destroy_quotainfo(mp);
2013 xfs_mount_reset_sbqflags(mp); 1995 (void)xfs_mount_reset_sbqflags(mp);
2014 } else { 1996 } else {
2015 cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); 1997 cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
2016 } 1998 }
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index b03eecf3b6cb..0b00b3c67015 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -184,8 +184,6 @@ typedef struct xfs_dquot_acct {
184#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++) 184#define XFS_QM_HOLD(xqm) ((xqm)->qm_nrefs++)
185#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--) 185#define XFS_QM_RELE(xqm) ((xqm)->qm_nrefs--)
186 186
187extern void xfs_mount_reset_sbqflags(xfs_mount_t *);
188
189extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); 187extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
190extern int xfs_qm_mount_quotas(xfs_mount_t *, int); 188extern int xfs_qm_mount_quotas(xfs_mount_t *, int);
191extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint); 189extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index dc3c37a1e158..8890a18a99d8 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -229,48 +229,6 @@ xfs_qm_syncall(
229 return error; 229 return error;
230} 230}
231 231
232/*
233 * Clear the quotaflags in memory and in the superblock.
234 */
235void
236xfs_mount_reset_sbqflags(
237 xfs_mount_t *mp)
238{
239 xfs_trans_t *tp;
240 unsigned long s;
241
242 mp->m_qflags = 0;
243 /*
244 * It is OK to look at sb_qflags here in mount path,
245 * without SB_LOCK.
246 */
247 if (mp->m_sb.sb_qflags == 0)
248 return;
249 s = XFS_SB_LOCK(mp);
250 mp->m_sb.sb_qflags = 0;
251 XFS_SB_UNLOCK(mp, s);
252
253 /*
254 * if the fs is readonly, let the incore superblock run
255 * with quotas off but don't flush the update out to disk
256 */
257 if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
258 return;
259#ifdef QUOTADEBUG
260 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
261#endif
262 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
263 if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
264 XFS_DEFAULT_LOG_COUNT)) {
265 xfs_trans_cancel(tp, 0);
266 xfs_fs_cmn_err(CE_ALERT, mp,
267 "xfs_mount_reset_sbqflags: Superblock update failed!");
268 return;
269 }
270 xfs_mod_sb(tp, XFS_SB_QFLAGS);
271 xfs_trans_commit(tp, 0, NULL);
272}
273
274STATIC int 232STATIC int
275xfs_qm_newmount( 233xfs_qm_newmount(
276 xfs_mount_t *mp, 234 xfs_mount_t *mp,
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 68e98962dbef..15e02e8a9d4f 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1053,7 +1053,6 @@ xfs_qm_dqrele_all_inodes(
1053 struct xfs_mount *mp, 1053 struct xfs_mount *mp,
1054 uint flags) 1054 uint flags)
1055{ 1055{
1056 vmap_t vmap;
1057 xfs_inode_t *ip, *topino; 1056 xfs_inode_t *ip, *topino;
1058 uint ireclaims; 1057 uint ireclaims;
1059 vnode_t *vp; 1058 vnode_t *vp;
@@ -1061,8 +1060,8 @@ xfs_qm_dqrele_all_inodes(
1061 1060
1062 ASSERT(mp->m_quotainfo); 1061 ASSERT(mp->m_quotainfo);
1063 1062
1064again:
1065 XFS_MOUNT_ILOCK(mp); 1063 XFS_MOUNT_ILOCK(mp);
1064again:
1066 ip = mp->m_inodes; 1065 ip = mp->m_inodes;
1067 if (ip == NULL) { 1066 if (ip == NULL) {
1068 XFS_MOUNT_IUNLOCK(mp); 1067 XFS_MOUNT_IUNLOCK(mp);
@@ -1090,18 +1089,14 @@ again:
1090 } 1089 }
1091 vnode_refd = B_FALSE; 1090 vnode_refd = B_FALSE;
1092 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { 1091 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
1093 /*
1094 * Sample vp mapping while holding the mplock, lest
1095 * we come across a non-existent vnode.
1096 */
1097 VMAP(vp, vmap);
1098 ireclaims = mp->m_ireclaims; 1092 ireclaims = mp->m_ireclaims;
1099 topino = mp->m_inodes; 1093 topino = mp->m_inodes;
1100 XFS_MOUNT_IUNLOCK(mp); 1094 vp = vn_grab(vp);
1095 if (!vp)
1096 goto again;
1101 1097
1098 XFS_MOUNT_IUNLOCK(mp);
1102 /* XXX restart limit ? */ 1099 /* XXX restart limit ? */
1103 if ( ! (vp = vn_get(vp, &vmap)))
1104 goto again;
1105 xfs_ilock(ip, XFS_ILOCK_EXCL); 1100 xfs_ilock(ip, XFS_ILOCK_EXCL);
1106 vnode_refd = B_TRUE; 1101 vnode_refd = B_TRUE;
1107 } else { 1102 } else {
@@ -1137,7 +1132,6 @@ again:
1137 */ 1132 */
1138 if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) { 1133 if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) {
1139 /* XXX use a sentinel */ 1134 /* XXX use a sentinel */
1140 XFS_MOUNT_IUNLOCK(mp);
1141 goto again; 1135 goto again;
1142 } 1136 }
1143 ip = ip->i_mnext; 1137 ip = ip->i_mnext;
diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c
index 4ed7b6928cd7..4e1a5ec22fa3 100644
--- a/fs/xfs/support/debug.c
+++ b/fs/xfs/support/debug.c
@@ -31,6 +31,7 @@
31 */ 31 */
32 32
33#include "debug.h" 33#include "debug.h"
34#include "spin.h"
34 35
35#include <asm/page.h> 36#include <asm/page.h>
36#include <linux/sched.h> 37#include <linux/sched.h>
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 8d01dce8c532..92fd1d67f878 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -85,7 +85,7 @@ xfs_acl_vhasacl_default(
85{ 85{
86 int error; 86 int error;
87 87
88 if (vp->v_type != VDIR) 88 if (!VN_ISDIR(vp))
89 return 0; 89 return 0;
90 xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); 90 xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
91 return (error == 0); 91 return (error == 0);
@@ -389,7 +389,7 @@ xfs_acl_allow_set(
389 389
390 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND)) 390 if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
391 return EPERM; 391 return EPERM;
392 if (kind == _ACL_TYPE_DEFAULT && vp->v_type != VDIR) 392 if (kind == _ACL_TYPE_DEFAULT && !VN_ISDIR(vp))
393 return ENOTDIR; 393 return ENOTDIR;
394 if (vp->v_vfsp->vfs_flag & VFS_RDONLY) 394 if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
395 return EROFS; 395 return EROFS;
@@ -750,7 +750,7 @@ xfs_acl_inherit(
750 * If the new file is a directory, its default ACL is a copy of 750 * If the new file is a directory, its default ACL is a copy of
751 * the containing directory's default ACL. 751 * the containing directory's default ACL.
752 */ 752 */
753 if (vp->v_type == VDIR) 753 if (VN_ISDIR(vp))
754 xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); 754 xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
755 if (!error && !basicperms) 755 if (!error && !basicperms)
756 xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); 756 xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 6f5d283888aa..3e76def1283d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4754,10 +4754,20 @@ xfs_bmapi(
4754 error = xfs_mod_incore_sb(mp, 4754 error = xfs_mod_incore_sb(mp,
4755 XFS_SBS_FDBLOCKS, 4755 XFS_SBS_FDBLOCKS,
4756 -(alen), rsvd); 4756 -(alen), rsvd);
4757 if (!error) 4757 if (!error) {
4758 error = xfs_mod_incore_sb(mp, 4758 error = xfs_mod_incore_sb(mp,
4759 XFS_SBS_FDBLOCKS, 4759 XFS_SBS_FDBLOCKS,
4760 -(indlen), rsvd); 4760 -(indlen), rsvd);
4761 if (error && rt) {
4762 xfs_mod_incore_sb(ip->i_mount,
4763 XFS_SBS_FREXTENTS,
4764 extsz, rsvd);
4765 } else if (error) {
4766 xfs_mod_incore_sb(ip->i_mount,
4767 XFS_SBS_FDBLOCKS,
4768 alen, rsvd);
4769 }
4770 }
4761 4771
4762 if (error) { 4772 if (error) {
4763 if (XFS_IS_QUOTA_ON(ip->i_mount)) 4773 if (XFS_IS_QUOTA_ON(ip->i_mount))
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 30b8285ad476..a264657acfd9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -274,6 +274,7 @@ xfs_buf_item_format(
274 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 274 ((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
275 vecp->i_addr = (xfs_caddr_t)&bip->bli_format; 275 vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
276 vecp->i_len = base_size; 276 vecp->i_len = base_size;
277 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT);
277 vecp++; 278 vecp++;
278 nvecs = 1; 279 nvecs = 1;
279 280
@@ -320,12 +321,14 @@ xfs_buf_item_format(
320 buffer_offset = first_bit * XFS_BLI_CHUNK; 321 buffer_offset = first_bit * XFS_BLI_CHUNK;
321 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 322 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
322 vecp->i_len = nbits * XFS_BLI_CHUNK; 323 vecp->i_len = nbits * XFS_BLI_CHUNK;
324 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
323 nvecs++; 325 nvecs++;
324 break; 326 break;
325 } else if (next_bit != last_bit + 1) { 327 } else if (next_bit != last_bit + 1) {
326 buffer_offset = first_bit * XFS_BLI_CHUNK; 328 buffer_offset = first_bit * XFS_BLI_CHUNK;
327 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 329 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
328 vecp->i_len = nbits * XFS_BLI_CHUNK; 330 vecp->i_len = nbits * XFS_BLI_CHUNK;
331 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
329 nvecs++; 332 nvecs++;
330 vecp++; 333 vecp++;
331 first_bit = next_bit; 334 first_bit = next_bit;
@@ -337,6 +340,7 @@ xfs_buf_item_format(
337 buffer_offset = first_bit * XFS_BLI_CHUNK; 340 buffer_offset = first_bit * XFS_BLI_CHUNK;
338 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 341 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
339 vecp->i_len = nbits * XFS_BLI_CHUNK; 342 vecp->i_len = nbits * XFS_BLI_CHUNK;
343 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
340/* You would think we need to bump the nvecs here too, but we do not 344/* You would think we need to bump the nvecs here too, but we do not
341 * this number is used by recovery, and it gets confused by the boundary 345 * this number is used by recovery, and it gets confused by the boundary
342 * split here 346 * split here
diff --git a/fs/xfs/xfs_dmapi.h b/fs/xfs/xfs_dmapi.h
index 55c17adaaa37..19e872856f6b 100644
--- a/fs/xfs/xfs_dmapi.h
+++ b/fs/xfs/xfs_dmapi.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index db7cbd1bc857..cc7d1494a45d 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -107,6 +107,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
107 107
108 log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format); 108 log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
109 log_vector->i_len = size; 109 log_vector->i_len = size;
110 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFI_FORMAT);
110 ASSERT(size >= sizeof(xfs_efi_log_format_t)); 111 ASSERT(size >= sizeof(xfs_efi_log_format_t));
111} 112}
112 113
@@ -426,6 +427,7 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
426 427
427 log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format); 428 log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
428 log_vector->i_len = size; 429 log_vector->i_len = size;
430 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFD_FORMAT);
429 ASSERT(size >= sizeof(xfs_efd_log_format_t)); 431 ASSERT(size >= sizeof(xfs_efd_log_format_t));
430} 432}
431 433
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index d3da00045f26..0d9ae8fb4138 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -30,6 +30,8 @@
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ 30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */ 31 */
32 32
33#include <linux/delay.h>
34
33#include "xfs.h" 35#include "xfs.h"
34 36
35#include "xfs_macros.h" 37#include "xfs_macros.h"
@@ -505,17 +507,15 @@ xfs_iget(
505 vnode_t *vp = NULL; 507 vnode_t *vp = NULL;
506 int error; 508 int error;
507 509
508retry:
509 XFS_STATS_INC(xs_ig_attempts); 510 XFS_STATS_INC(xs_ig_attempts);
510 511
512retry:
511 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) { 513 if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
512 bhv_desc_t *bdp; 514 bhv_desc_t *bdp;
513 xfs_inode_t *ip; 515 xfs_inode_t *ip;
514 int newnode;
515 516
516 vp = LINVFS_GET_VP(inode); 517 vp = LINVFS_GET_VP(inode);
517 if (inode->i_state & I_NEW) { 518 if (inode->i_state & I_NEW) {
518inode_allocate:
519 vn_initialize(inode); 519 vn_initialize(inode);
520 error = xfs_iget_core(vp, mp, tp, ino, flags, 520 error = xfs_iget_core(vp, mp, tp, ino, flags,
521 lock_flags, ipp, bno); 521 lock_flags, ipp, bno);
@@ -526,32 +526,25 @@ inode_allocate:
526 iput(inode); 526 iput(inode);
527 } 527 }
528 } else { 528 } else {
529 /* These are true if the inode is in inactive or 529 /*
530 * reclaim. The linux inode is about to go away, 530 * If the inode is not fully constructed due to
531 * wait for that path to finish, and try again. 531 * filehandle mistmatches wait for the inode to go
532 * away and try again.
533 *
534 * iget_locked will call __wait_on_freeing_inode
535 * to wait for the inode to go away.
532 */ 536 */
533 if (vp->v_flag & (VINACT | VRECLM)) { 537 if (is_bad_inode(inode) ||
534 vn_wait(vp); 538 ((bdp = vn_bhv_lookup(VN_BHV_HEAD(vp),
539 &xfs_vnodeops)) == NULL)) {
535 iput(inode); 540 iput(inode);
541 delay(1);
536 goto retry; 542 goto retry;
537 } 543 }
538 544
539 if (is_bad_inode(inode)) {
540 iput(inode);
541 return EIO;
542 }
543
544 bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
545 if (bdp == NULL) {
546 XFS_STATS_INC(xs_ig_dup);
547 goto inode_allocate;
548 }
549 ip = XFS_BHVTOI(bdp); 545 ip = XFS_BHVTOI(bdp);
550 if (lock_flags != 0) 546 if (lock_flags != 0)
551 xfs_ilock(ip, lock_flags); 547 xfs_ilock(ip, lock_flags);
552 newnode = (ip->i_d.di_mode == 0);
553 if (newnode)
554 xfs_iocore_inode_reinit(ip);
555 XFS_STATS_INC(xs_ig_found); 548 XFS_STATS_INC(xs_ig_found);
556 *ipp = ip; 549 *ipp = ip;
557 error = 0; 550 error = 0;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 34bdf5909687..db43308aae93 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1128,7 +1128,6 @@ xfs_ialloc(
1128 ASSERT(ip != NULL); 1128 ASSERT(ip != NULL);
1129 1129
1130 vp = XFS_ITOV(ip); 1130 vp = XFS_ITOV(ip);
1131 vp->v_type = IFTOVT(mode);
1132 ip->i_d.di_mode = (__uint16_t)mode; 1131 ip->i_d.di_mode = (__uint16_t)mode;
1133 ip->i_d.di_onlink = 0; 1132 ip->i_d.di_onlink = 0;
1134 ip->i_d.di_nlink = nlink; 1133 ip->i_d.di_nlink = nlink;
@@ -1250,7 +1249,7 @@ xfs_ialloc(
1250 */ 1249 */
1251 xfs_trans_log_inode(tp, ip, flags); 1250 xfs_trans_log_inode(tp, ip, flags);
1252 1251
1253 /* now that we have a v_type we can set Linux inode ops (& unlock) */ 1252 /* now that we have an i_mode we can set Linux inode ops (& unlock) */
1254 VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1); 1253 VFS_INIT_VNODE(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
1255 1254
1256 *ipp = ip; 1255 *ipp = ip;
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 0eed30f5cb19..276ec70eb7f9 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -248,6 +248,7 @@ xfs_inode_item_format(
248 248
249 vecp->i_addr = (xfs_caddr_t)&iip->ili_format; 249 vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
250 vecp->i_len = sizeof(xfs_inode_log_format_t); 250 vecp->i_len = sizeof(xfs_inode_log_format_t);
251 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IFORMAT);
251 vecp++; 252 vecp++;
252 nvecs = 1; 253 nvecs = 1;
253 254
@@ -292,6 +293,7 @@ xfs_inode_item_format(
292 293
293 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 294 vecp->i_addr = (xfs_caddr_t)&ip->i_d;
294 vecp->i_len = sizeof(xfs_dinode_core_t); 295 vecp->i_len = sizeof(xfs_dinode_core_t);
296 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
295 vecp++; 297 vecp++;
296 nvecs++; 298 nvecs++;
297 iip->ili_format.ilf_fields |= XFS_ILOG_CORE; 299 iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
@@ -349,6 +351,7 @@ xfs_inode_item_format(
349 vecp->i_addr = 351 vecp->i_addr =
350 (char *)(ip->i_df.if_u1.if_extents); 352 (char *)(ip->i_df.if_u1.if_extents);
351 vecp->i_len = ip->i_df.if_bytes; 353 vecp->i_len = ip->i_df.if_bytes;
354 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
352 } else 355 } else
353#endif 356#endif
354 { 357 {
@@ -367,6 +370,7 @@ xfs_inode_item_format(
367 vecp->i_addr = (xfs_caddr_t)ext_buffer; 370 vecp->i_addr = (xfs_caddr_t)ext_buffer;
368 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, 371 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
369 XFS_DATA_FORK); 372 XFS_DATA_FORK);
373 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
370 } 374 }
371 ASSERT(vecp->i_len <= ip->i_df.if_bytes); 375 ASSERT(vecp->i_len <= ip->i_df.if_bytes);
372 iip->ili_format.ilf_dsize = vecp->i_len; 376 iip->ili_format.ilf_dsize = vecp->i_len;
@@ -384,6 +388,7 @@ xfs_inode_item_format(
384 ASSERT(ip->i_df.if_broot != NULL); 388 ASSERT(ip->i_df.if_broot != NULL);
385 vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; 389 vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
386 vecp->i_len = ip->i_df.if_broot_bytes; 390 vecp->i_len = ip->i_df.if_broot_bytes;
391 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IBROOT);
387 vecp++; 392 vecp++;
388 nvecs++; 393 nvecs++;
389 iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; 394 iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
@@ -409,6 +414,7 @@ xfs_inode_item_format(
409 ASSERT((ip->i_df.if_real_bytes == 0) || 414 ASSERT((ip->i_df.if_real_bytes == 0) ||
410 (ip->i_df.if_real_bytes == data_bytes)); 415 (ip->i_df.if_real_bytes == data_bytes));
411 vecp->i_len = (int)data_bytes; 416 vecp->i_len = (int)data_bytes;
417 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ILOCAL);
412 vecp++; 418 vecp++;
413 nvecs++; 419 nvecs++;
414 iip->ili_format.ilf_dsize = (unsigned)data_bytes; 420 iip->ili_format.ilf_dsize = (unsigned)data_bytes;
@@ -486,6 +492,7 @@ xfs_inode_item_format(
486 vecp->i_len = xfs_iextents_copy(ip, ext_buffer, 492 vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
487 XFS_ATTR_FORK); 493 XFS_ATTR_FORK);
488#endif 494#endif
495 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_EXT);
489 iip->ili_format.ilf_asize = vecp->i_len; 496 iip->ili_format.ilf_asize = vecp->i_len;
490 vecp++; 497 vecp++;
491 nvecs++; 498 nvecs++;
@@ -500,6 +507,7 @@ xfs_inode_item_format(
500 ASSERT(ip->i_afp->if_broot != NULL); 507 ASSERT(ip->i_afp->if_broot != NULL);
501 vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; 508 vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
502 vecp->i_len = ip->i_afp->if_broot_bytes; 509 vecp->i_len = ip->i_afp->if_broot_bytes;
510 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_BROOT);
503 vecp++; 511 vecp++;
504 nvecs++; 512 nvecs++;
505 iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; 513 iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
@@ -523,6 +531,7 @@ xfs_inode_item_format(
523 ASSERT((ip->i_afp->if_real_bytes == 0) || 531 ASSERT((ip->i_afp->if_real_bytes == 0) ||
524 (ip->i_afp->if_real_bytes == data_bytes)); 532 (ip->i_afp->if_real_bytes == data_bytes));
525 vecp->i_len = (int)data_bytes; 533 vecp->i_len = (int)data_bytes;
534 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_LOCAL);
526 vecp++; 535 vecp++;
527 nvecs++; 536 nvecs++;
528 iip->ili_format.ilf_asize = (unsigned)data_bytes; 537 iip->ili_format.ilf_asize = (unsigned)data_bytes;
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2edd6769e5d3..d0f5be63cddb 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -226,13 +226,12 @@ xfs_iomap(
226 xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count); 226 xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count);
227 lockmode = XFS_LCK_MAP_SHARED(mp, io); 227 lockmode = XFS_LCK_MAP_SHARED(mp, io);
228 bmapi_flags = XFS_BMAPI_ENTIRE; 228 bmapi_flags = XFS_BMAPI_ENTIRE;
229 if (flags & BMAPI_IGNSTATE)
230 bmapi_flags |= XFS_BMAPI_IGSTATE;
231 break; 229 break;
232 case BMAPI_WRITE: 230 case BMAPI_WRITE:
233 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count); 231 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count);
234 lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; 232 lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
235 bmapi_flags = 0; 233 if (flags & BMAPI_IGNSTATE)
234 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
236 XFS_ILOCK(mp, io, lockmode); 235 XFS_ILOCK(mp, io, lockmode);
237 break; 236 break;
238 case BMAPI_ALLOCATE: 237 case BMAPI_ALLOCATE:
@@ -391,9 +390,9 @@ xfs_iomap_write_direct(
391 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp; 390 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
392 xfs_bmap_free_t free_list; 391 xfs_bmap_free_t free_list;
393 int aeof; 392 int aeof;
394 xfs_filblks_t datablocks, qblocks, resblks; 393 xfs_filblks_t qblocks, resblks;
395 int committed; 394 int committed;
396 int numrtextents; 395 int resrtextents;
397 396
398 /* 397 /*
399 * Make sure that the dquots are there. This doesn't hold 398 * Make sure that the dquots are there. This doesn't hold
@@ -434,14 +433,14 @@ xfs_iomap_write_direct(
434 433
435 if (!(extsz = ip->i_d.di_extsize)) 434 if (!(extsz = ip->i_d.di_extsize))
436 extsz = mp->m_sb.sb_rextsize; 435 extsz = mp->m_sb.sb_rextsize;
437 numrtextents = qblocks = (count_fsb + extsz - 1); 436 resrtextents = qblocks = (count_fsb + extsz - 1);
438 do_div(numrtextents, mp->m_sb.sb_rextsize); 437 do_div(resrtextents, mp->m_sb.sb_rextsize);
438 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
439 quota_flag = XFS_QMOPT_RES_RTBLKS; 439 quota_flag = XFS_QMOPT_RES_RTBLKS;
440 datablocks = 0;
441 } else { 440 } else {
442 datablocks = qblocks = count_fsb; 441 resrtextents = 0;
442 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb);
443 quota_flag = XFS_QMOPT_RES_REGBLKS; 443 quota_flag = XFS_QMOPT_RES_REGBLKS;
444 numrtextents = 0;
445 } 444 }
446 445
447 /* 446 /*
@@ -449,9 +448,8 @@ xfs_iomap_write_direct(
449 */ 448 */
450 xfs_iunlock(ip, XFS_ILOCK_EXCL); 449 xfs_iunlock(ip, XFS_ILOCK_EXCL);
451 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT); 450 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
452 resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
453 error = xfs_trans_reserve(tp, resblks, 451 error = xfs_trans_reserve(tp, resblks,
454 XFS_WRITE_LOG_RES(mp), numrtextents, 452 XFS_WRITE_LOG_RES(mp), resrtextents,
455 XFS_TRANS_PERM_LOG_RES, 453 XFS_TRANS_PERM_LOG_RES,
456 XFS_WRITE_LOG_COUNT); 454 XFS_WRITE_LOG_COUNT);
457 455
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 1cd2ac163877..54a6f1142403 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -159,11 +159,15 @@ xfs_buftarg_t *xlog_target;
159void 159void
160xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string) 160xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
161{ 161{
162 if (! log->l_grant_trace) { 162 unsigned long cnts;
163 log->l_grant_trace = ktrace_alloc(1024, KM_NOSLEEP); 163
164 if (! log->l_grant_trace) 164 if (!log->l_grant_trace) {
165 log->l_grant_trace = ktrace_alloc(2048, KM_NOSLEEP);
166 if (!log->l_grant_trace)
165 return; 167 return;
166 } 168 }
169 /* ticket counts are 1 byte each */
170 cnts = ((unsigned long)tic->t_ocnt) | ((unsigned long)tic->t_cnt) << 8;
167 171
168 ktrace_enter(log->l_grant_trace, 172 ktrace_enter(log->l_grant_trace,
169 (void *)tic, 173 (void *)tic,
@@ -178,10 +182,10 @@ xlog_trace_loggrant(xlog_t *log, xlog_ticket_t *tic, xfs_caddr_t string)
178 (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)), 182 (void *)((unsigned long)CYCLE_LSN(log->l_tail_lsn)),
179 (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)), 183 (void *)((unsigned long)BLOCK_LSN(log->l_tail_lsn)),
180 (void *)string, 184 (void *)string,
181 (void *)((unsigned long)13), 185 (void *)((unsigned long)tic->t_trans_type),
182 (void *)((unsigned long)14), 186 (void *)cnts,
183 (void *)((unsigned long)15), 187 (void *)((unsigned long)tic->t_curr_res),
184 (void *)((unsigned long)16)); 188 (void *)((unsigned long)tic->t_unit_res));
185} 189}
186 190
187void 191void
@@ -274,9 +278,11 @@ xfs_log_done(xfs_mount_t *mp,
274 * Release ticket if not permanent reservation or a specifc 278 * Release ticket if not permanent reservation or a specifc
275 * request has been made to release a permanent reservation. 279 * request has been made to release a permanent reservation.
276 */ 280 */
281 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
277 xlog_ungrant_log_space(log, ticket); 282 xlog_ungrant_log_space(log, ticket);
278 xlog_state_put_ticket(log, ticket); 283 xlog_state_put_ticket(log, ticket);
279 } else { 284 } else {
285 xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)");
280 xlog_regrant_reserve_log_space(log, ticket); 286 xlog_regrant_reserve_log_space(log, ticket);
281 } 287 }
282 288
@@ -399,7 +405,8 @@ xfs_log_reserve(xfs_mount_t *mp,
399 int cnt, 405 int cnt,
400 xfs_log_ticket_t *ticket, 406 xfs_log_ticket_t *ticket,
401 __uint8_t client, 407 __uint8_t client,
402 uint flags) 408 uint flags,
409 uint t_type)
403{ 410{
404 xlog_t *log = mp->m_log; 411 xlog_t *log = mp->m_log;
405 xlog_ticket_t *internal_ticket; 412 xlog_ticket_t *internal_ticket;
@@ -421,13 +428,19 @@ xfs_log_reserve(xfs_mount_t *mp,
421 if (*ticket != NULL) { 428 if (*ticket != NULL) {
422 ASSERT(flags & XFS_LOG_PERM_RESERV); 429 ASSERT(flags & XFS_LOG_PERM_RESERV);
423 internal_ticket = (xlog_ticket_t *)*ticket; 430 internal_ticket = (xlog_ticket_t *)*ticket;
431 xlog_trace_loggrant(log, internal_ticket, "xfs_log_reserve: existing ticket (permanent trans)");
424 xlog_grant_push_ail(mp, internal_ticket->t_unit_res); 432 xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
425 retval = xlog_regrant_write_log_space(log, internal_ticket); 433 retval = xlog_regrant_write_log_space(log, internal_ticket);
426 } else { 434 } else {
427 /* may sleep if need to allocate more tickets */ 435 /* may sleep if need to allocate more tickets */
428 internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, 436 internal_ticket = xlog_ticket_get(log, unit_bytes, cnt,
429 client, flags); 437 client, flags);
438 internal_ticket->t_trans_type = t_type;
430 *ticket = internal_ticket; 439 *ticket = internal_ticket;
440 xlog_trace_loggrant(log, internal_ticket,
441 (internal_ticket->t_flags & XLOG_TIC_PERM_RESERV) ?
442 "xfs_log_reserve: create new ticket (permanent trans)" :
443 "xfs_log_reserve: create new ticket");
431 xlog_grant_push_ail(mp, 444 xlog_grant_push_ail(mp,
432 (internal_ticket->t_unit_res * 445 (internal_ticket->t_unit_res *
433 internal_ticket->t_cnt)); 446 internal_ticket->t_cnt));
@@ -601,8 +614,9 @@ xfs_log_unmount_write(xfs_mount_t *mp)
601 if (! (XLOG_FORCED_SHUTDOWN(log))) { 614 if (! (XLOG_FORCED_SHUTDOWN(log))) {
602 reg[0].i_addr = (void*)&magic; 615 reg[0].i_addr = (void*)&magic;
603 reg[0].i_len = sizeof(magic); 616 reg[0].i_len = sizeof(magic);
617 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT);
604 618
605 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0); 619 error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0, 0);
606 if (!error) { 620 if (!error) {
607 /* remove inited flag */ 621 /* remove inited flag */
608 ((xlog_ticket_t *)tic)->t_flags = 0; 622 ((xlog_ticket_t *)tic)->t_flags = 0;
@@ -1272,6 +1286,7 @@ xlog_commit_record(xfs_mount_t *mp,
1272 1286
1273 reg[0].i_addr = NULL; 1287 reg[0].i_addr = NULL;
1274 reg[0].i_len = 0; 1288 reg[0].i_len = 0;
1289 XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_COMMIT);
1275 1290
1276 ASSERT_ALWAYS(iclog); 1291 ASSERT_ALWAYS(iclog);
1277 if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp, 1292 if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
@@ -1605,6 +1620,117 @@ xlog_state_finish_copy(xlog_t *log,
1605 1620
1606 1621
1607/* 1622/*
1623 * print out info relating to regions written which consume
1624 * the reservation
1625 */
1626#if defined(XFS_LOG_RES_DEBUG)
1627STATIC void
1628xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
1629{
1630 uint i;
1631 uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
1632
1633 /* match with XLOG_REG_TYPE_* in xfs_log.h */
1634 static char *res_type_str[XLOG_REG_TYPE_MAX] = {
1635 "bformat",
1636 "bchunk",
1637 "efi_format",
1638 "efd_format",
1639 "iformat",
1640 "icore",
1641 "iext",
1642 "ibroot",
1643 "ilocal",
1644 "iattr_ext",
1645 "iattr_broot",
1646 "iattr_local",
1647 "qformat",
1648 "dquot",
1649 "quotaoff",
1650 "LR header",
1651 "unmount",
1652 "commit",
1653 "trans header"
1654 };
1655 static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
1656 "SETATTR_NOT_SIZE",
1657 "SETATTR_SIZE",
1658 "INACTIVE",
1659 "CREATE",
1660 "CREATE_TRUNC",
1661 "TRUNCATE_FILE",
1662 "REMOVE",
1663 "LINK",
1664 "RENAME",
1665 "MKDIR",
1666 "RMDIR",
1667 "SYMLINK",
1668 "SET_DMATTRS",
1669 "GROWFS",
1670 "STRAT_WRITE",
1671 "DIOSTRAT",
1672 "WRITE_SYNC",
1673 "WRITEID",
1674 "ADDAFORK",
1675 "ATTRINVAL",
1676 "ATRUNCATE",
1677 "ATTR_SET",
1678 "ATTR_RM",
1679 "ATTR_FLAG",
1680 "CLEAR_AGI_BUCKET",
1681 "QM_SBCHANGE",
1682 "DUMMY1",
1683 "DUMMY2",
1684 "QM_QUOTAOFF",
1685 "QM_DQALLOC",
1686 "QM_SETQLIM",
1687 "QM_DQCLUSTER",
1688 "QM_QINOCREATE",
1689 "QM_QUOTAOFF_END",
1690 "SB_UNIT",
1691 "FSYNC_TS",
1692 "GROWFSRT_ALLOC",
1693 "GROWFSRT_ZERO",
1694 "GROWFSRT_FREE",
1695 "SWAPEXT"
1696 };
1697
1698 xfs_fs_cmn_err(CE_WARN, mp,
1699 "xfs_log_write: reservation summary:\n"
1700 " trans type = %s (%u)\n"
1701 " unit res = %d bytes\n"
1702 " current res = %d bytes\n"
1703 " total reg = %u bytes (o/flow = %u bytes)\n"
1704 " ophdrs = %u (ophdr space = %u bytes)\n"
1705 " ophdr + reg = %u bytes\n"
1706 " num regions = %u\n",
1707 ((ticket->t_trans_type <= 0 ||
1708 ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
1709 "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
1710 ticket->t_trans_type,
1711 ticket->t_unit_res,
1712 ticket->t_curr_res,
1713 ticket->t_res_arr_sum, ticket->t_res_o_flow,
1714 ticket->t_res_num_ophdrs, ophdr_spc,
1715 ticket->t_res_arr_sum +
1716 ticket->t_res_o_flow + ophdr_spc,
1717 ticket->t_res_num);
1718
1719 for (i = 0; i < ticket->t_res_num; i++) {
1720 uint r_type = ticket->t_res_arr[i].r_type;
1721 cmn_err(CE_WARN,
1722 "region[%u]: %s - %u bytes\n",
1723 i,
1724 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
1725 "bad-rtype" : res_type_str[r_type-1]),
1726 ticket->t_res_arr[i].r_len);
1727 }
1728}
1729#else
1730#define xlog_print_tic_res(mp, ticket)
1731#endif
1732
1733/*
1608 * Write some region out to in-core log 1734 * Write some region out to in-core log
1609 * 1735 *
1610 * This will be called when writing externally provided regions or when 1736 * This will be called when writing externally provided regions or when
@@ -1677,16 +1803,21 @@ xlog_write(xfs_mount_t * mp,
1677 * xlog_op_header_t and may need to be double word aligned. 1803 * xlog_op_header_t and may need to be double word aligned.
1678 */ 1804 */
1679 len = 0; 1805 len = 0;
1680 if (ticket->t_flags & XLOG_TIC_INITED) /* acct for start rec of xact */ 1806 if (ticket->t_flags & XLOG_TIC_INITED) { /* acct for start rec of xact */
1681 len += sizeof(xlog_op_header_t); 1807 len += sizeof(xlog_op_header_t);
1808 XLOG_TIC_ADD_OPHDR(ticket);
1809 }
1682 1810
1683 for (index = 0; index < nentries; index++) { 1811 for (index = 0; index < nentries; index++) {
1684 len += sizeof(xlog_op_header_t); /* each region gets >= 1 */ 1812 len += sizeof(xlog_op_header_t); /* each region gets >= 1 */
1813 XLOG_TIC_ADD_OPHDR(ticket);
1685 len += reg[index].i_len; 1814 len += reg[index].i_len;
1815 XLOG_TIC_ADD_REGION(ticket, reg[index].i_len, reg[index].i_type);
1686 } 1816 }
1687 contwr = *start_lsn = 0; 1817 contwr = *start_lsn = 0;
1688 1818
1689 if (ticket->t_curr_res < len) { 1819 if (ticket->t_curr_res < len) {
1820 xlog_print_tic_res(mp, ticket);
1690#ifdef DEBUG 1821#ifdef DEBUG
1691 xlog_panic( 1822 xlog_panic(
1692 "xfs_log_write: reservation ran out. Need to up reservation"); 1823 "xfs_log_write: reservation ran out. Need to up reservation");
@@ -1790,6 +1921,7 @@ xlog_write(xfs_mount_t * mp,
1790 len += sizeof(xlog_op_header_t); /* from splitting of region */ 1921 len += sizeof(xlog_op_header_t); /* from splitting of region */
1791 /* account for new log op header */ 1922 /* account for new log op header */
1792 ticket->t_curr_res -= sizeof(xlog_op_header_t); 1923 ticket->t_curr_res -= sizeof(xlog_op_header_t);
1924 XLOG_TIC_ADD_OPHDR(ticket);
1793 } 1925 }
1794 xlog_verify_dest_ptr(log, ptr); 1926 xlog_verify_dest_ptr(log, ptr);
1795 1927
@@ -2282,6 +2414,9 @@ restart:
2282 */ 2414 */
2283 if (log_offset == 0) { 2415 if (log_offset == 0) {
2284 ticket->t_curr_res -= log->l_iclog_hsize; 2416 ticket->t_curr_res -= log->l_iclog_hsize;
2417 XLOG_TIC_ADD_REGION(ticket,
2418 log->l_iclog_hsize,
2419 XLOG_REG_TYPE_LRHEADER);
2285 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); 2420 INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
2286 ASSIGN_LSN(head->h_lsn, log); 2421 ASSIGN_LSN(head->h_lsn, log);
2287 ASSERT(log->l_curr_block >= 0); 2422 ASSERT(log->l_curr_block >= 0);
@@ -2468,6 +2603,7 @@ xlog_regrant_write_log_space(xlog_t *log,
2468#endif 2603#endif
2469 2604
2470 tic->t_curr_res = tic->t_unit_res; 2605 tic->t_curr_res = tic->t_unit_res;
2606 XLOG_TIC_RESET_RES(tic);
2471 2607
2472 if (tic->t_cnt > 0) 2608 if (tic->t_cnt > 0)
2473 return (0); 2609 return (0);
@@ -2608,6 +2744,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2608 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w'); 2744 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
2609 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r'); 2745 XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
2610 ticket->t_curr_res = ticket->t_unit_res; 2746 ticket->t_curr_res = ticket->t_unit_res;
2747 XLOG_TIC_RESET_RES(ticket);
2611 xlog_trace_loggrant(log, ticket, 2748 xlog_trace_loggrant(log, ticket,
2612 "xlog_regrant_reserve_log_space: sub current res"); 2749 "xlog_regrant_reserve_log_space: sub current res");
2613 xlog_verify_grant_head(log, 1); 2750 xlog_verify_grant_head(log, 1);
@@ -2624,6 +2761,7 @@ xlog_regrant_reserve_log_space(xlog_t *log,
2624 xlog_verify_grant_head(log, 0); 2761 xlog_verify_grant_head(log, 0);
2625 GRANT_UNLOCK(log, s); 2762 GRANT_UNLOCK(log, s);
2626 ticket->t_curr_res = ticket->t_unit_res; 2763 ticket->t_curr_res = ticket->t_unit_res;
2764 XLOG_TIC_RESET_RES(ticket);
2627} /* xlog_regrant_reserve_log_space */ 2765} /* xlog_regrant_reserve_log_space */
2628 2766
2629 2767
@@ -3179,29 +3317,57 @@ xlog_ticket_get(xlog_t *log,
3179 * and their unit amount is the total amount of space required. 3317 * and their unit amount is the total amount of space required.
3180 * 3318 *
3181 * The following lines of code account for non-transaction data 3319 * The following lines of code account for non-transaction data
3182 * which occupy space in the on-disk log. 3320 * which occupy space in the on-disk log.
3321 *
3322 * Normal form of a transaction is:
3323 * <oph><trans-hdr><start-oph><reg1-oph><reg1><reg2-oph>...<commit-oph>
3324 * and then there are LR hdrs, split-recs and roundoff at end of syncs.
3325 *
3326 * We need to account for all the leadup data and trailer data
3327 * around the transaction data.
3328 * And then we need to account for the worst case in terms of using
3329 * more space.
3330 * The worst case will happen if:
3331 * - the placement of the transaction happens to be such that the
3332 * roundoff is at its maximum
3333 * - the transaction data is synced before the commit record is synced
3334 * i.e. <transaction-data><roundoff> | <commit-rec><roundoff>
3335 * Therefore the commit record is in its own Log Record.
3336 * This can happen as the commit record is called with its
3337 * own region to xlog_write().
3338 * This then means that in the worst case, roundoff can happen for
3339 * the commit-rec as well.
3340 * The commit-rec is smaller than padding in this scenario and so it is
3341 * not added separately.
3183 */ 3342 */
3184 3343
3344 /* for trans header */
3345 unit_bytes += sizeof(xlog_op_header_t);
3346 unit_bytes += sizeof(xfs_trans_header_t);
3347
3185 /* for start-rec */ 3348 /* for start-rec */
3186 unit_bytes += sizeof(xlog_op_header_t); 3349 unit_bytes += sizeof(xlog_op_header_t);
3350
3351 /* for LR headers */
3352 num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
3353 unit_bytes += log->l_iclog_hsize * num_headers;
3354
3355 /* for commit-rec LR header - note: padding will subsume the ophdr */
3356 unit_bytes += log->l_iclog_hsize;
3357
3358 /* for split-recs - ophdrs added when data split over LRs */
3359 unit_bytes += sizeof(xlog_op_header_t) * num_headers;
3187 3360
3188 /* for padding */ 3361 /* for roundoff padding for transaction data and one for commit record */
3189 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) && 3362 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
3190 log->l_mp->m_sb.sb_logsunit > 1) { 3363 log->l_mp->m_sb.sb_logsunit > 1) {
3191 /* log su roundoff */ 3364 /* log su roundoff */
3192 unit_bytes += log->l_mp->m_sb.sb_logsunit; 3365 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3193 } else { 3366 } else {
3194 /* BB roundoff */ 3367 /* BB roundoff */
3195 unit_bytes += BBSIZE; 3368 unit_bytes += 2*BBSIZE;
3196 } 3369 }
3197 3370
3198 /* for commit-rec */
3199 unit_bytes += sizeof(xlog_op_header_t);
3200
3201 /* for LR headers */
3202 num_headers = ((unit_bytes + log->l_iclog_size-1) >> log->l_iclog_size_log);
3203 unit_bytes += log->l_iclog_hsize * num_headers;
3204
3205 tic->t_unit_res = unit_bytes; 3371 tic->t_unit_res = unit_bytes;
3206 tic->t_curr_res = unit_bytes; 3372 tic->t_curr_res = unit_bytes;
3207 tic->t_cnt = cnt; 3373 tic->t_cnt = cnt;
@@ -3209,10 +3375,13 @@ xlog_ticket_get(xlog_t *log,
3209 tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff); 3375 tic->t_tid = (xlog_tid_t)((__psint_t)tic & 0xffffffff);
3210 tic->t_clientid = client; 3376 tic->t_clientid = client;
3211 tic->t_flags = XLOG_TIC_INITED; 3377 tic->t_flags = XLOG_TIC_INITED;
3378 tic->t_trans_type = 0;
3212 if (xflags & XFS_LOG_PERM_RESERV) 3379 if (xflags & XFS_LOG_PERM_RESERV)
3213 tic->t_flags |= XLOG_TIC_PERM_RESERV; 3380 tic->t_flags |= XLOG_TIC_PERM_RESERV;
3214 sv_init(&(tic->t_sema), SV_DEFAULT, "logtick"); 3381 sv_init(&(tic->t_sema), SV_DEFAULT, "logtick");
3215 3382
3383 XLOG_TIC_RESET_RES(tic);
3384
3216 return tic; 3385 return tic;
3217} /* xlog_ticket_get */ 3386} /* xlog_ticket_get */
3218 3387
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 0db122ddda3f..18961119fc65 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -114,9 +114,44 @@ xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
114#define XFS_VOLUME 0x2 114#define XFS_VOLUME 0x2
115#define XFS_LOG 0xaa 115#define XFS_LOG 0xaa
116 116
117
118/* Region types for iovec's i_type */
119#if defined(XFS_LOG_RES_DEBUG)
120#define XLOG_REG_TYPE_BFORMAT 1
121#define XLOG_REG_TYPE_BCHUNK 2
122#define XLOG_REG_TYPE_EFI_FORMAT 3
123#define XLOG_REG_TYPE_EFD_FORMAT 4
124#define XLOG_REG_TYPE_IFORMAT 5
125#define XLOG_REG_TYPE_ICORE 6
126#define XLOG_REG_TYPE_IEXT 7
127#define XLOG_REG_TYPE_IBROOT 8
128#define XLOG_REG_TYPE_ILOCAL 9
129#define XLOG_REG_TYPE_IATTR_EXT 10
130#define XLOG_REG_TYPE_IATTR_BROOT 11
131#define XLOG_REG_TYPE_IATTR_LOCAL 12
132#define XLOG_REG_TYPE_QFORMAT 13
133#define XLOG_REG_TYPE_DQUOT 14
134#define XLOG_REG_TYPE_QUOTAOFF 15
135#define XLOG_REG_TYPE_LRHEADER 16
136#define XLOG_REG_TYPE_UNMOUNT 17
137#define XLOG_REG_TYPE_COMMIT 18
138#define XLOG_REG_TYPE_TRANSHDR 19
139#define XLOG_REG_TYPE_MAX 19
140#endif
141
142#if defined(XFS_LOG_RES_DEBUG)
143#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
144#else
145#define XLOG_VEC_SET_TYPE(vecp, t)
146#endif
147
148
117typedef struct xfs_log_iovec { 149typedef struct xfs_log_iovec {
118 xfs_caddr_t i_addr; /* beginning address of region */ 150 xfs_caddr_t i_addr; /* beginning address of region */
119 int i_len; /* length in bytes of region */ 151 int i_len; /* length in bytes of region */
152#if defined(XFS_LOG_RES_DEBUG)
153 uint i_type; /* type of region */
154#endif
120} xfs_log_iovec_t; 155} xfs_log_iovec_t;
121 156
122typedef void* xfs_log_ticket_t; 157typedef void* xfs_log_ticket_t;
@@ -159,7 +194,8 @@ int xfs_log_reserve(struct xfs_mount *mp,
159 int count, 194 int count,
160 xfs_log_ticket_t *ticket, 195 xfs_log_ticket_t *ticket,
161 __uint8_t clientid, 196 __uint8_t clientid,
162 uint flags); 197 uint flags,
198 uint t_type);
163int xfs_log_write(struct xfs_mount *mp, 199int xfs_log_write(struct xfs_mount *mp,
164 xfs_log_iovec_t region[], 200 xfs_log_iovec_t region[],
165 int nentries, 201 int nentries,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 1a1d452f15f9..eb7fdc6ebc32 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -335,18 +335,66 @@ typedef __uint32_t xlog_tid_t;
335 335
336#define XLOG_COVER_OPS 5 336#define XLOG_COVER_OPS 5
337 337
338
339/* Ticket reservation region accounting */
340#if defined(XFS_LOG_RES_DEBUG)
341#define XLOG_TIC_LEN_MAX 15
342#define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
343 (t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
344#define XLOG_TIC_ADD_OPHDR(t) ((t)->t_res_num_ophdrs++)
345#define XLOG_TIC_ADD_REGION(t, len, type) \
346 do { \
347 if ((t)->t_res_num == XLOG_TIC_LEN_MAX) { \
348 /* add to overflow and start again */ \
349 (t)->t_res_o_flow += (t)->t_res_arr_sum; \
350 (t)->t_res_num = 0; \
351 (t)->t_res_arr_sum = 0; \
352 } \
353 (t)->t_res_arr[(t)->t_res_num].r_len = (len); \
354 (t)->t_res_arr[(t)->t_res_num].r_type = (type); \
355 (t)->t_res_arr_sum += (len); \
356 (t)->t_res_num++; \
357 } while (0)
358
359/*
360 * Reservation region
361 * As would be stored in xfs_log_iovec but without the i_addr which
362 * we don't care about.
363 */
364typedef struct xlog_res {
365 uint r_len;
366 uint r_type;
367} xlog_res_t;
368#else
369#define XLOG_TIC_RESET_RES(t)
370#define XLOG_TIC_ADD_OPHDR(t)
371#define XLOG_TIC_ADD_REGION(t, len, type)
372#endif
373
374
338typedef struct xlog_ticket { 375typedef struct xlog_ticket {
339 sv_t t_sema; /* sleep on this semaphore :20 */ 376 sv_t t_sema; /* sleep on this semaphore : 20 */
340 struct xlog_ticket *t_next; /* : 4 */ 377 struct xlog_ticket *t_next; /* :4|8 */
341 struct xlog_ticket *t_prev; /* : 4 */ 378 struct xlog_ticket *t_prev; /* :4|8 */
342 xlog_tid_t t_tid; /* transaction identifier : 4 */ 379 xlog_tid_t t_tid; /* transaction identifier : 4 */
343 int t_curr_res; /* current reservation in bytes : 4 */ 380 int t_curr_res; /* current reservation in bytes : 4 */
344 int t_unit_res; /* unit reservation in bytes : 4 */ 381 int t_unit_res; /* unit reservation in bytes : 4 */
345 __uint8_t t_ocnt; /* original count : 1 */ 382 char t_ocnt; /* original count : 1 */
346 __uint8_t t_cnt; /* current count : 1 */ 383 char t_cnt; /* current count : 1 */
347 __uint8_t t_clientid; /* who does this belong to; : 1 */ 384 char t_clientid; /* who does this belong to; : 1 */
348 __uint8_t t_flags; /* properties of reservation : 1 */ 385 char t_flags; /* properties of reservation : 1 */
386 uint t_trans_type; /* transaction type : 4 */
387
388#if defined (XFS_LOG_RES_DEBUG)
389 /* reservation array fields */
390 uint t_res_num; /* num in array : 4 */
391 xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : X */
392 uint t_res_num_ophdrs; /* num op hdrs : 4 */
393 uint t_res_arr_sum; /* array sum : 4 */
394 uint t_res_o_flow; /* sum overflow : 4 */
395#endif
349} xlog_ticket_t; 396} xlog_ticket_t;
397
350#endif 398#endif
351 399
352 400
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 0aac28ddb81c..14faabaabf29 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1387,7 +1387,7 @@ xlog_recover_add_to_cont_trans(
1387 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr; 1387 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1388 old_len = item->ri_buf[item->ri_cnt-1].i_len; 1388 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1389 1389
1390 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0); 1390 ptr = kmem_realloc(old_ptr, len+old_len, old_len, 0u);
1391 memcpy(&ptr[old_len], dp, len); /* d, s, l */ 1391 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1392 item->ri_buf[item->ri_cnt-1].i_len += len; 1392 item->ri_buf[item->ri_cnt-1].i_len += len;
1393 item->ri_buf[item->ri_cnt-1].i_addr = ptr; 1393 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c
index 4f40c92863d5..a6cd6324e946 100644
--- a/fs/xfs/xfs_qmops.c
+++ b/fs/xfs/xfs_qmops.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -42,7 +42,8 @@
42#include "xfs_dir2.h" 42#include "xfs_dir2.h"
43#include "xfs_dmapi.h" 43#include "xfs_dmapi.h"
44#include "xfs_mount.h" 44#include "xfs_mount.h"
45 45#include "xfs_quota.h"
46#include "xfs_error.h"
46 47
47STATIC struct xfs_dquot * 48STATIC struct xfs_dquot *
48xfs_dqvopchown_default( 49xfs_dqvopchown_default(
@@ -54,8 +55,79 @@ xfs_dqvopchown_default(
54 return NULL; 55 return NULL;
55} 56}
56 57
58/*
59 * Clear the quotaflags in memory and in the superblock.
60 */
61int
62xfs_mount_reset_sbqflags(xfs_mount_t *mp)
63{
64 int error;
65 xfs_trans_t *tp;
66 unsigned long s;
67
68 mp->m_qflags = 0;
69 /*
70 * It is OK to look at sb_qflags here in mount path,
71 * without SB_LOCK.
72 */
73 if (mp->m_sb.sb_qflags == 0)
74 return 0;
75 s = XFS_SB_LOCK(mp);
76 mp->m_sb.sb_qflags = 0;
77 XFS_SB_UNLOCK(mp, s);
78
79 /*
80 * if the fs is readonly, let the incore superblock run
81 * with quotas off but don't flush the update out to disk
82 */
83 if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
84 return 0;
85#ifdef QUOTADEBUG
86 xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
87#endif
88 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
89 if ((error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
90 XFS_DEFAULT_LOG_COUNT))) {
91 xfs_trans_cancel(tp, 0);
92 xfs_fs_cmn_err(CE_ALERT, mp,
93 "xfs_mount_reset_sbqflags: Superblock update failed!");
94 return error;
95 }
96 xfs_mod_sb(tp, XFS_SB_QFLAGS);
97 error = xfs_trans_commit(tp, 0, NULL);
98 return error;
99}
100
101STATIC int
102xfs_noquota_init(
103 xfs_mount_t *mp,
104 uint *needquotamount,
105 uint *quotaflags)
106{
107 int error = 0;
108
109 *quotaflags = 0;
110 *needquotamount = B_FALSE;
111
112 ASSERT(!XFS_IS_QUOTA_ON(mp));
113
114 /*
115 * If a file system had quotas running earlier, but decided to
116 * mount without -o uquota/pquota/gquota options, revoke the
117 * quotachecked license.
118 */
119 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
120 cmn_err(CE_NOTE,
121 "XFS resetting qflags for filesystem %s",
122 mp->m_fsname);
123
124 error = xfs_mount_reset_sbqflags(mp);
125 }
126 return error;
127}
128
57xfs_qmops_t xfs_qmcore_stub = { 129xfs_qmops_t xfs_qmcore_stub = {
58 .xfs_qminit = (xfs_qminit_t) fs_noerr, 130 .xfs_qminit = (xfs_qminit_t) xfs_noquota_init,
59 .xfs_qmdone = (xfs_qmdone_t) fs_noerr, 131 .xfs_qmdone = (xfs_qmdone_t) fs_noerr,
60 .xfs_qmmount = (xfs_qmmount_t) fs_noerr, 132 .xfs_qmmount = (xfs_qmmount_t) fs_noerr,
61 .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr, 133 .xfs_qmunmount = (xfs_qmunmount_t) fs_noerr,
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 7134576ae7fa..32cb79752d5d 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify it 4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as 5 * under the terms of version 2 of the GNU General Public License as
@@ -160,6 +160,20 @@ typedef struct xfs_qoff_logformat {
160#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */ 160#define XFS_GQUOTA_ACCT 0x0040 /* group quota accounting ON */
161 161
162/* 162/*
163 * Quota Accounting/Enforcement flags
164 */
165#define XFS_ALL_QUOTA_ACCT \
166 (XFS_UQUOTA_ACCT | XFS_GQUOTA_ACCT | XFS_PQUOTA_ACCT)
167#define XFS_ALL_QUOTA_ENFD (XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD)
168#define XFS_ALL_QUOTA_CHKD (XFS_UQUOTA_CHKD | XFS_OQUOTA_CHKD)
169
170#define XFS_IS_QUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
171#define XFS_IS_QUOTA_ENFORCED(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ENFD)
172#define XFS_IS_UQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_UQUOTA_ACCT)
173#define XFS_IS_PQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_PQUOTA_ACCT)
174#define XFS_IS_GQUOTA_RUNNING(mp) ((mp)->m_qflags & XFS_GQUOTA_ACCT)
175
176/*
163 * Incore only flags for quotaoff - these bits get cleared when quota(s) 177 * Incore only flags for quotaoff - these bits get cleared when quota(s)
164 * are in the process of getting turned off. These flags are in m_qflags but 178 * are in the process of getting turned off. These flags are in m_qflags but
165 * never in sb_qflags. 179 * never in sb_qflags.
@@ -362,6 +376,7 @@ typedef struct xfs_dqtrxops {
362 f | XFS_QMOPT_RES_REGBLKS) 376 f | XFS_QMOPT_RES_REGBLKS)
363 377
364extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); 378extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *);
379extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
365 380
366extern struct bhv_vfsops xfs_qmops; 381extern struct bhv_vfsops xfs_qmops;
367 382
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 06dfca531f79..92efe272b83d 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -276,7 +276,7 @@ xfs_trans_reserve(
276 276
277 error = xfs_log_reserve(tp->t_mountp, logspace, logcount, 277 error = xfs_log_reserve(tp->t_mountp, logspace, logcount,
278 &tp->t_ticket, 278 &tp->t_ticket,
279 XFS_TRANSACTION, log_flags); 279 XFS_TRANSACTION, log_flags, tp->t_type);
280 if (error) { 280 if (error) {
281 goto undo_blocks; 281 goto undo_blocks;
282 } 282 }
@@ -1032,6 +1032,7 @@ xfs_trans_fill_vecs(
1032 tp->t_header.th_num_items = nitems; 1032 tp->t_header.th_num_items = nitems;
1033 log_vector->i_addr = (xfs_caddr_t)&tp->t_header; 1033 log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
1034 log_vector->i_len = sizeof(xfs_trans_header_t); 1034 log_vector->i_len = sizeof(xfs_trans_header_t);
1035 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR);
1035} 1036}
1036 1037
1037 1038
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ec541d66fa2a..a263aec8b3a6 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -112,6 +112,7 @@ typedef struct xfs_trans_header {
112#define XFS_TRANS_GROWFSRT_ZERO 38 112#define XFS_TRANS_GROWFSRT_ZERO 38
113#define XFS_TRANS_GROWFSRT_FREE 39 113#define XFS_TRANS_GROWFSRT_FREE 39
114#define XFS_TRANS_SWAPEXT 40 114#define XFS_TRANS_SWAPEXT 40
115#define XFS_TRANS_TYPE_MAX 40
115/* new transaction types need to be reflected in xfs_logprint(8) */ 116/* new transaction types need to be reflected in xfs_logprint(8) */
116 117
117 118
@@ -998,6 +999,7 @@ struct xfs_buf *xfs_trans_getsb(xfs_trans_t *, struct xfs_mount *, int);
998void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *); 999void xfs_trans_brelse(xfs_trans_t *, struct xfs_buf *);
999void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *); 1000void xfs_trans_bjoin(xfs_trans_t *, struct xfs_buf *);
1000void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *); 1001void xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
1002void xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
1001void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *); 1003void xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
1002void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 1004void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
1003void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *); 1005void xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 7bc5eab4c2c1..2a71b4f91bfa 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -379,8 +379,8 @@ xfs_trans_delete_ail(
379 else { 379 else {
380 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, 380 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
381 "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL"); 381 "xfs_trans_delete_ail: attempting to delete a log item that is not in the AIL");
382 xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
383 AIL_UNLOCK(mp, s); 382 AIL_UNLOCK(mp, s);
383 xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
384 } 384 }
385 } 385 }
386} 386}
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 144da7a85466..e733293dd7f4 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -714,6 +714,29 @@ xfs_trans_bhold(xfs_trans_t *tp,
714} 714}
715 715
716/* 716/*
717 * Cancel the previous buffer hold request made on this buffer
718 * for this transaction.
719 */
720void
721xfs_trans_bhold_release(xfs_trans_t *tp,
722 xfs_buf_t *bp)
723{
724 xfs_buf_log_item_t *bip;
725
726 ASSERT(XFS_BUF_ISBUSY(bp));
727 ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
728 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
729
730 bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
731 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
732 ASSERT(!(bip->bli_format.blf_flags & XFS_BLI_CANCEL));
733 ASSERT(atomic_read(&bip->bli_refcount) > 0);
734 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
735 bip->bli_flags &= ~XFS_BLI_HOLD;
736 xfs_buf_item_trace("BHOLD RELEASE", bip);
737}
738
739/*
717 * This is called to mark bytes first through last inclusive of the given 740 * This is called to mark bytes first through last inclusive of the given
718 * buffer as needing to be logged when the transaction is committed. 741 * buffer as needing to be logged when the transaction is committed.
719 * The buffer must already be associated with the given transaction. 742 * The buffer must already be associated with the given transaction.
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 42bcc0215203..f1a904e23ade 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -795,7 +795,6 @@ xfs_statvfs(
795 xfs_mount_t *mp; 795 xfs_mount_t *mp;
796 xfs_sb_t *sbp; 796 xfs_sb_t *sbp;
797 unsigned long s; 797 unsigned long s;
798 u64 id;
799 798
800 mp = XFS_BHVTOM(bdp); 799 mp = XFS_BHVTOM(bdp);
801 sbp = &(mp->m_sb); 800 sbp = &(mp->m_sb);
@@ -823,9 +822,7 @@ xfs_statvfs(
823 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 822 statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
824 XFS_SB_UNLOCK(mp, s); 823 XFS_SB_UNLOCK(mp, s);
825 824
826 id = huge_encode_dev(mp->m_dev); 825 xfs_statvfs_fsid(statp, mp);
827 statp->f_fsid.val[0] = (u32)id;
828 statp->f_fsid.val[1] = (u32)(id >> 32);
829 statp->f_namelen = MAXNAMELEN - 1; 826 statp->f_namelen = MAXNAMELEN - 1;
830 827
831 return 0; 828 return 0;
@@ -906,7 +903,6 @@ xfs_sync_inodes(
906 xfs_inode_t *ip_next; 903 xfs_inode_t *ip_next;
907 xfs_buf_t *bp; 904 xfs_buf_t *bp;
908 vnode_t *vp = NULL; 905 vnode_t *vp = NULL;
909 vmap_t vmap;
910 int error; 906 int error;
911 int last_error; 907 int last_error;
912 uint64_t fflag; 908 uint64_t fflag;
@@ -1101,48 +1097,21 @@ xfs_sync_inodes(
1101 * lock in xfs_ireclaim() after the inode is pulled from 1097 * lock in xfs_ireclaim() after the inode is pulled from
1102 * the mount list will sleep until we release it here. 1098 * the mount list will sleep until we release it here.
1103 * This keeps the vnode from being freed while we reference 1099 * This keeps the vnode from being freed while we reference
1104 * it. It is also cheaper and simpler than actually doing 1100 * it.
1105 * a vn_get() for every inode we touch here.
1106 */ 1101 */
1107 if (xfs_ilock_nowait(ip, lock_flags) == 0) { 1102 if (xfs_ilock_nowait(ip, lock_flags) == 0) {
1108
1109 if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { 1103 if ((flags & SYNC_BDFLUSH) || (vp == NULL)) {
1110 ip = ip->i_mnext; 1104 ip = ip->i_mnext;
1111 continue; 1105 continue;
1112 } 1106 }
1113 1107
1114 /* 1108 vp = vn_grab(vp);
1115 * We need to unlock the inode list lock in order
1116 * to lock the inode. Insert a marker record into
1117 * the inode list to remember our position, dropping
1118 * the lock is now done inside the IPOINTER_INSERT
1119 * macro.
1120 *
1121 * We also use the inode list lock to protect us
1122 * in taking a snapshot of the vnode version number
1123 * for use in calling vn_get().
1124 */
1125 VMAP(vp, vmap);
1126 IPOINTER_INSERT(ip, mp);
1127
1128 vp = vn_get(vp, &vmap);
1129 if (vp == NULL) { 1109 if (vp == NULL) {
1130 /* 1110 ip = ip->i_mnext;
1131 * The vnode was reclaimed once we let go
1132 * of the inode list lock. Skip to the
1133 * next list entry. Remove the marker.
1134 */
1135
1136 XFS_MOUNT_ILOCK(mp);
1137
1138 mount_locked = B_TRUE;
1139 vnode_refed = B_FALSE;
1140
1141 IPOINTER_REMOVE(ip, mp);
1142
1143 continue; 1111 continue;
1144 } 1112 }
1145 1113
1114 IPOINTER_INSERT(ip, mp);
1146 xfs_ilock(ip, lock_flags); 1115 xfs_ilock(ip, lock_flags);
1147 1116
1148 ASSERT(vp == XFS_ITOV(ip)); 1117 ASSERT(vp == XFS_ITOV(ip));
@@ -1533,7 +1502,10 @@ xfs_syncsub(
1533 * eventually kicked out of the cache. 1502 * eventually kicked out of the cache.
1534 */ 1503 */
1535 if (flags & SYNC_REFCACHE) { 1504 if (flags & SYNC_REFCACHE) {
1536 xfs_refcache_purge_some(mp); 1505 if (flags & SYNC_WAIT)
1506 xfs_refcache_purge_mp(mp);
1507 else
1508 xfs_refcache_purge_some(mp);
1537 } 1509 }
1538 1510
1539 /* 1511 /*
@@ -1649,6 +1621,10 @@ xfs_vget(
1649#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 1621#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
1650#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 1622#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
1651#define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 1623#define MNTOPT_MTPT "mtpt" /* filesystem mount point */
1624#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
1625#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
1626#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
1627#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
1652#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 1628#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
1653#define MNTOPT_IHASHSIZE "ihashsize" /* size of inode hash table */ 1629#define MNTOPT_IHASHSIZE "ihashsize" /* size of inode hash table */
1654#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 1630#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
@@ -1769,6 +1745,12 @@ xfs_parseargs(
1769 } 1745 }
1770 args->flags |= XFSMNT_IHASHSIZE; 1746 args->flags |= XFSMNT_IHASHSIZE;
1771 args->ihashsize = simple_strtoul(value, &eov, 10); 1747 args->ihashsize = simple_strtoul(value, &eov, 10);
1748 } else if (!strcmp(this_char, MNTOPT_GRPID) ||
1749 !strcmp(this_char, MNTOPT_BSDGROUPS)) {
1750 vfsp->vfs_flag |= VFS_GRPID;
1751 } else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
1752 !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
1753 vfsp->vfs_flag &= ~VFS_GRPID;
1772 } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 1754 } else if (!strcmp(this_char, MNTOPT_WSYNC)) {
1773 args->flags |= XFSMNT_WSYNC; 1755 args->flags |= XFSMNT_WSYNC;
1774 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { 1756 } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) {
@@ -1890,6 +1872,7 @@ xfs_showargs(
1890 }; 1872 };
1891 struct proc_xfs_info *xfs_infop; 1873 struct proc_xfs_info *xfs_infop;
1892 struct xfs_mount *mp = XFS_BHVTOM(bhv); 1874 struct xfs_mount *mp = XFS_BHVTOM(bhv);
1875 struct vfs *vfsp = XFS_MTOVFS(mp);
1893 1876
1894 for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) { 1877 for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) {
1895 if (mp->m_flags & xfs_infop->flag) 1878 if (mp->m_flags & xfs_infop->flag)
@@ -1926,7 +1909,10 @@ xfs_showargs(
1926 1909
1927 if (!(mp->m_flags & XFS_MOUNT_32BITINOOPT)) 1910 if (!(mp->m_flags & XFS_MOUNT_32BITINOOPT))
1928 seq_printf(m, "," MNTOPT_64BITINODE); 1911 seq_printf(m, "," MNTOPT_64BITINODE);
1929 1912
1913 if (vfsp->vfs_flag & VFS_GRPID)
1914 seq_printf(m, "," MNTOPT_GRPID);
1915
1930 return 0; 1916 return 0;
1931} 1917}
1932 1918
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 1377c868f3f4..58bfe629b933 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -104,7 +104,7 @@ xfs_open(
104 * If it's a directory with any blocks, read-ahead block 0 104 * If it's a directory with any blocks, read-ahead block 0
105 * as we're almost certain to have the next operation be a read there. 105 * as we're almost certain to have the next operation be a read there.
106 */ 106 */
107 if (vp->v_type == VDIR && ip->i_d.di_nextents > 0) { 107 if (VN_ISDIR(vp) && ip->i_d.di_nextents > 0) {
108 mode = xfs_ilock_map_shared(ip); 108 mode = xfs_ilock_map_shared(ip);
109 if (ip->i_d.di_nextents > 0) 109 if (ip->i_d.di_nextents > 0)
110 (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); 110 (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
@@ -163,18 +163,21 @@ xfs_getattr(
163 /* 163 /*
164 * Copy from in-core inode. 164 * Copy from in-core inode.
165 */ 165 */
166 vap->va_type = vp->v_type; 166 vap->va_mode = ip->i_d.di_mode;
167 vap->va_mode = ip->i_d.di_mode & MODEMASK;
168 vap->va_uid = ip->i_d.di_uid; 167 vap->va_uid = ip->i_d.di_uid;
169 vap->va_gid = ip->i_d.di_gid; 168 vap->va_gid = ip->i_d.di_gid;
170 vap->va_projid = ip->i_d.di_projid; 169 vap->va_projid = ip->i_d.di_projid;
171 170
172 /* 171 /*
173 * Check vnode type block/char vs. everything else. 172 * Check vnode type block/char vs. everything else.
174 * Do it with bitmask because that's faster than looking
175 * for multiple values individually.
176 */ 173 */
177 if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) { 174 switch (ip->i_d.di_mode & S_IFMT) {
175 case S_IFBLK:
176 case S_IFCHR:
177 vap->va_rdev = ip->i_df.if_u2.if_rdev;
178 vap->va_blocksize = BLKDEV_IOSIZE;
179 break;
180 default:
178 vap->va_rdev = 0; 181 vap->va_rdev = 0;
179 182
180 if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 183 if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
@@ -224,9 +227,7 @@ xfs_getattr(
224 (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : 227 (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) :
225 (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog); 228 (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog);
226 } 229 }
227 } else { 230 break;
228 vap->va_rdev = ip->i_df.if_u2.if_rdev;
229 vap->va_blocksize = BLKDEV_IOSIZE;
230 } 231 }
231 232
232 vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec; 233 vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec;
@@ -468,7 +469,7 @@ xfs_setattr(
468 m |= S_ISGID; 469 m |= S_ISGID;
469#if 0 470#if 0
470 /* Linux allows this, Irix doesn't. */ 471 /* Linux allows this, Irix doesn't. */
471 if ((vap->va_mode & S_ISVTX) && vp->v_type != VDIR) 472 if ((vap->va_mode & S_ISVTX) && !VN_ISDIR(vp))
472 m |= S_ISVTX; 473 m |= S_ISVTX;
473#endif 474#endif
474 if (m && !capable(CAP_FSETID)) 475 if (m && !capable(CAP_FSETID))
@@ -546,10 +547,10 @@ xfs_setattr(
546 goto error_return; 547 goto error_return;
547 } 548 }
548 549
549 if (vp->v_type == VDIR) { 550 if (VN_ISDIR(vp)) {
550 code = XFS_ERROR(EISDIR); 551 code = XFS_ERROR(EISDIR);
551 goto error_return; 552 goto error_return;
552 } else if (vp->v_type != VREG) { 553 } else if (!VN_ISREG(vp)) {
553 code = XFS_ERROR(EINVAL); 554 code = XFS_ERROR(EINVAL);
554 goto error_return; 555 goto error_return;
555 } 556 }
@@ -1567,7 +1568,7 @@ xfs_release(
1567 vp = BHV_TO_VNODE(bdp); 1568 vp = BHV_TO_VNODE(bdp);
1568 ip = XFS_BHVTOI(bdp); 1569 ip = XFS_BHVTOI(bdp);
1569 1570
1570 if ((vp->v_type != VREG) || (ip->i_d.di_mode == 0)) { 1571 if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0)) {
1571 return 0; 1572 return 0;
1572 } 1573 }
1573 1574
@@ -1895,7 +1896,7 @@ xfs_create(
1895 dp = XFS_BHVTOI(dir_bdp); 1896 dp = XFS_BHVTOI(dir_bdp);
1896 mp = dp->i_mount; 1897 mp = dp->i_mount;
1897 1898
1898 dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); 1899 dm_di_mode = vap->va_mode;
1899 namelen = VNAMELEN(dentry); 1900 namelen = VNAMELEN(dentry);
1900 1901
1901 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { 1902 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
@@ -1973,8 +1974,7 @@ xfs_create(
1973 (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen))) 1974 (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
1974 goto error_return; 1975 goto error_return;
1975 rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0; 1976 rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
1976 error = xfs_dir_ialloc(&tp, dp, 1977 error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 1,
1977 MAKEIMODE(vap->va_type,vap->va_mode), 1,
1978 rdev, credp, prid, resblks > 0, 1978 rdev, credp, prid, resblks > 0,
1979 &ip, &committed); 1979 &ip, &committed);
1980 if (error) { 1980 if (error) {
@@ -2620,7 +2620,7 @@ xfs_link(
2620 vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address); 2620 vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
2621 2621
2622 target_namelen = VNAMELEN(dentry); 2622 target_namelen = VNAMELEN(dentry);
2623 if (src_vp->v_type == VDIR) 2623 if (VN_ISDIR(src_vp))
2624 return XFS_ERROR(EPERM); 2624 return XFS_ERROR(EPERM);
2625 2625
2626 src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops); 2626 src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops);
@@ -2805,7 +2805,7 @@ xfs_mkdir(
2805 2805
2806 tp = NULL; 2806 tp = NULL;
2807 dp_joined_to_trans = B_FALSE; 2807 dp_joined_to_trans = B_FALSE;
2808 dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); 2808 dm_di_mode = vap->va_mode;
2809 2809
2810 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) { 2810 if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
2811 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE, 2811 error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
@@ -2879,8 +2879,7 @@ xfs_mkdir(
2879 /* 2879 /*
2880 * create the directory inode. 2880 * create the directory inode.
2881 */ 2881 */
2882 error = xfs_dir_ialloc(&tp, dp, 2882 error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 2,
2883 MAKEIMODE(vap->va_type,vap->va_mode), 2,
2884 0, credp, prid, resblks > 0, 2883 0, credp, prid, resblks > 0,
2885 &cdp, NULL); 2884 &cdp, NULL);
2886 if (error) { 2885 if (error) {
@@ -3650,7 +3649,7 @@ xfs_rwlock(
3650 vnode_t *vp; 3649 vnode_t *vp;
3651 3650
3652 vp = BHV_TO_VNODE(bdp); 3651 vp = BHV_TO_VNODE(bdp);
3653 if (vp->v_type == VDIR) 3652 if (VN_ISDIR(vp))
3654 return 1; 3653 return 1;
3655 ip = XFS_BHVTOI(bdp); 3654 ip = XFS_BHVTOI(bdp);
3656 if (locktype == VRWLOCK_WRITE) { 3655 if (locktype == VRWLOCK_WRITE) {
@@ -3681,7 +3680,7 @@ xfs_rwunlock(
3681 vnode_t *vp; 3680 vnode_t *vp;
3682 3681
3683 vp = BHV_TO_VNODE(bdp); 3682 vp = BHV_TO_VNODE(bdp);
3684 if (vp->v_type == VDIR) 3683 if (VN_ISDIR(vp))
3685 return; 3684 return;
3686 ip = XFS_BHVTOI(bdp); 3685 ip = XFS_BHVTOI(bdp);
3687 if (locktype == VRWLOCK_WRITE) { 3686 if (locktype == VRWLOCK_WRITE) {
@@ -3847,51 +3846,10 @@ xfs_reclaim(
3847 return 0; 3846 return 0;
3848 } 3847 }
3849 3848
3850 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 3849 vn_iowait(vp);
3851 if (ip->i_d.di_size > 0) {
3852 /*
3853 * Flush and invalidate any data left around that is
3854 * a part of this file.
3855 *
3856 * Get the inode's i/o lock so that buffers are pushed
3857 * out while holding the proper lock. We can't hold
3858 * the inode lock here since flushing out buffers may
3859 * cause us to try to get the lock in xfs_strategy().
3860 *
3861 * We don't have to call remapf() here, because there
3862 * cannot be any mapped file references to this vnode
3863 * since it is being reclaimed.
3864 */
3865 xfs_ilock(ip, XFS_IOLOCK_EXCL);
3866
3867 /*
3868 * If we hit an IO error, we need to make sure that the
3869 * buffer and page caches of file data for
3870 * the file are tossed away. We don't want to use
3871 * VOP_FLUSHINVAL_PAGES here because we don't want dirty
3872 * pages to stay attached to the vnode, but be
3873 * marked P_BAD. pdflush/vnode_pagebad
3874 * hates that.
3875 */
3876 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
3877 VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_NONE);
3878 } else {
3879 VOP_TOSS_PAGES(vp, 0, -1, FI_NONE);
3880 }
3881 3850
3882 ASSERT(VN_CACHED(vp) == 0); 3851 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
3883 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || 3852 ASSERT(VN_CACHED(vp) == 0);
3884 ip->i_delayed_blks == 0);
3885 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
3886 } else if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
3887 /*
3888 * di_size field may not be quite accurate if we're
3889 * shutting down.
3890 */
3891 VOP_TOSS_PAGES(vp, 0, -1, FI_NONE);
3892 ASSERT(VN_CACHED(vp) == 0);
3893 }
3894 }
3895 3853
3896 /* If we have nothing to flush with this inode then complete the 3854 /* If we have nothing to flush with this inode then complete the
3897 * teardown now, otherwise break the link between the xfs inode 3855 * teardown now, otherwise break the link between the xfs inode
@@ -4567,7 +4525,7 @@ xfs_change_file_space(
4567 /* 4525 /*
4568 * must be a regular file and have write permission 4526 * must be a regular file and have write permission
4569 */ 4527 */
4570 if (vp->v_type != VREG) 4528 if (!VN_ISREG(vp))
4571 return XFS_ERROR(EINVAL); 4529 return XFS_ERROR(EINVAL);
4572 4530
4573 xfs_ilock(ip, XFS_ILOCK_SHARED); 4531 xfs_ilock(ip, XFS_ILOCK_SHARED);
diff --git a/include/asm-alpha/auxvec.h b/include/asm-alpha/auxvec.h
new file mode 100644
index 000000000000..e96fe880e310
--- /dev/null
+++ b/include/asm-alpha/auxvec.h
@@ -0,0 +1,24 @@
1#ifndef __ASM_ALPHA_AUXVEC_H
2#define __ASM_ALPHA_AUXVEC_H
3
4/* Reserve these numbers for any future use of a VDSO. */
5#if 0
6#define AT_SYSINFO 32
7#define AT_SYSINFO_EHDR 33
8#endif
9
10/* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
11 value is -1, then the cache doesn't exist. Otherwise:
12
13 bit 0-3: Cache set-associativity; 0 means fully associative.
14 bit 4-7: Log2 of cacheline size.
15 bit 8-31: Size of the entire cache >> 8.
16 bit 32-63: Reserved.
17*/
18
19#define AT_L1I_CACHESHAPE 34
20#define AT_L1D_CACHESHAPE 35
21#define AT_L2_CACHESHAPE 36
22#define AT_L3_CACHESHAPE 37
23
24#endif /* __ASM_ALPHA_AUXVEC_H */
diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h
index e94a945a2314..6c2d78fba264 100644
--- a/include/asm-alpha/elf.h
+++ b/include/asm-alpha/elf.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_ALPHA_ELF_H 1#ifndef __ASM_ALPHA_ELF_H
2#define __ASM_ALPHA_ELF_H 2#define __ASM_ALPHA_ELF_H
3 3
4#include <asm/auxvec.h>
5
4/* Special values for the st_other field in the symbol table. */ 6/* Special values for the st_other field in the symbol table. */
5 7
6#define STO_ALPHA_NOPV 0x80 8#define STO_ALPHA_NOPV 0x80
@@ -142,26 +144,6 @@ extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
142 : amask (AMASK_CIX) ? "ev6" : "ev67"); \ 144 : amask (AMASK_CIX) ? "ev6" : "ev67"); \
143}) 145})
144 146
145/* Reserve these numbers for any future use of a VDSO. */
146#if 0
147#define AT_SYSINFO 32
148#define AT_SYSINFO_EHDR 33
149#endif
150
151/* More complete cache descriptions than AT_[DIU]CACHEBSIZE. If the
152 value is -1, then the cache doesn't exist. Otherwise:
153
154 bit 0-3: Cache set-associativity; 0 means fully associative.
155 bit 4-7: Log2 of cacheline size.
156 bit 8-31: Size of the entire cache >> 8.
157 bit 32-63: Reserved.
158*/
159
160#define AT_L1I_CACHESHAPE 34
161#define AT_L1D_CACHESHAPE 35
162#define AT_L2_CACHESHAPE 36
163#define AT_L3_CACHESHAPE 37
164
165#ifdef __KERNEL__ 147#ifdef __KERNEL__
166 148
167#define SET_PERSONALITY(EX, IBCS2) \ 149#define SET_PERSONALITY(EX, IBCS2) \
diff --git a/include/asm-alpha/fcntl.h b/include/asm-alpha/fcntl.h
index 6b7d6c1649ce..87f2cf459e26 100644
--- a/include/asm-alpha/fcntl.h
+++ b/include/asm-alpha/fcntl.h
@@ -3,10 +3,6 @@
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */ 5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 01000 /* not fcntl */ 6#define O_CREAT 01000 /* not fcntl */
11#define O_TRUNC 02000 /* not fcntl */ 7#define O_TRUNC 02000 /* not fcntl */
12#define O_EXCL 04000 /* not fcntl */ 8#define O_EXCL 04000 /* not fcntl */
@@ -14,20 +10,13 @@
14 10
15#define O_NONBLOCK 00004 11#define O_NONBLOCK 00004
16#define O_APPEND 00010 12#define O_APPEND 00010
17#define O_NDELAY O_NONBLOCK
18#define O_SYNC 040000 13#define O_SYNC 040000
19#define FASYNC 020000 /* fcntl, for BSD compatibility */
20#define O_DIRECTORY 0100000 /* must be a directory */ 14#define O_DIRECTORY 0100000 /* must be a directory */
21#define O_NOFOLLOW 0200000 /* don't follow links */ 15#define O_NOFOLLOW 0200000 /* don't follow links */
22#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ 16#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */
23#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */ 17#define O_DIRECT 02000000 /* direct disk access - should check with OSF/1 */
24#define O_NOATIME 04000000 18#define O_NOATIME 04000000
25 19
26#define F_DUPFD 0 /* dup */
27#define F_GETFD 1 /* get close_on_exec */
28#define F_SETFD 2 /* set/clear close_on_exec */
29#define F_GETFL 3 /* get file->f_flags */
30#define F_SETFL 4 /* set file->f_flags */
31#define F_GETLK 7 20#define F_GETLK 7
32#define F_SETLK 8 21#define F_SETLK 8
33#define F_SETLKW 9 22#define F_SETLKW 9
@@ -37,9 +26,6 @@
37#define F_SETSIG 10 /* for sockets. */ 26#define F_SETSIG 10 /* for sockets. */
38#define F_GETSIG 11 /* for sockets. */ 27#define F_GETSIG 11 /* for sockets. */
39 28
40/* for F_[GET|SET]FL */
41#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
42
43/* for posix fcntl() and lockf() */ 29/* for posix fcntl() and lockf() */
44#define F_RDLCK 1 30#define F_RDLCK 1
45#define F_WRLCK 2 31#define F_WRLCK 2
@@ -51,25 +37,6 @@
51 37
52#define F_INPROGRESS 64 38#define F_INPROGRESS 64
53 39
54/* operations for bsd flock(), also used by the kernel implementation */ 40#include <asm-generic/fcntl.h>
55#define LOCK_SH 1 /* shared lock */
56#define LOCK_EX 2 /* exclusive lock */
57#define LOCK_NB 4 /* or'd with one of the above to prevent
58 blocking */
59#define LOCK_UN 8 /* remove lock */
60#define LOCK_MAND 32 /* This is a mandatory flock */
61#define LOCK_READ 64 /* ... Which allows concurrent read operations */
62#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
63#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
64
65struct flock {
66 short l_type;
67 short l_whence;
68 __kernel_off_t l_start;
69 __kernel_off_t l_len;
70 __kernel_pid_t l_pid;
71};
72
73#define F_LINUX_SPECIFIC_BASE 1024
74 41
75#endif 42#endif
diff --git a/include/asm-alpha/futex.h b/include/asm-alpha/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-alpha/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-alpha/hdreg.h b/include/asm-alpha/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-alpha/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-alpha/uaccess.h b/include/asm-alpha/uaccess.h
index 4c39ee750f38..22de3b434a22 100644
--- a/include/asm-alpha/uaccess.h
+++ b/include/asm-alpha/uaccess.h
@@ -48,12 +48,6 @@
48 __access_ok(((unsigned long)(addr)),(size),get_fs()); \ 48 __access_ok(((unsigned long)(addr)),(size),get_fs()); \
49}) 49})
50 50
51/* this function will go away soon - use access_ok() instead */
52extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
53{
54 return access_ok(type,addr,size) ? 0 : -EFAULT;
55}
56
57/* 51/*
58 * These are the main single-value transfer routines. They automatically 52 * These are the main single-value transfer routines. They automatically
59 * use the right size if we just have the right pointer type. 53 * use the right size if we just have the right pointer type.
diff --git a/include/asm-arm/arch-omap/board-h4.h b/include/asm-arm/arch-omap/board-h4.h
index 79138dcfb4ac..d64ee9211eed 100644
--- a/include/asm-arm/arch-omap/board-h4.h
+++ b/include/asm-arm/arch-omap/board-h4.h
@@ -30,6 +30,9 @@
30#define __ASM_ARCH_OMAP_H4_H 30#define __ASM_ARCH_OMAP_H4_H
31 31
32/* Placeholder for H4 specific defines */ 32/* Placeholder for H4 specific defines */
33/* GPMC CS1 */
34#define OMAP24XX_ETHR_START 0x08000300
35#define OMAP24XX_ETHR_GPIO_IRQ 92
33 36
34#endif /* __ASM_ARCH_OMAP_H4_H */ 37#endif /* __ASM_ARCH_OMAP_H4_H */
35 38
diff --git a/include/asm-arm/arch-omap/board-innovator.h b/include/asm-arm/arch-omap/board-innovator.h
index 0f1abaefe4de..79574e0ed13d 100644
--- a/include/asm-arm/arch-omap/board-innovator.h
+++ b/include/asm-arm/arch-omap/board-innovator.h
@@ -36,31 +36,6 @@
36#define OMAP1510P1_EMIFS_PRI_VALUE 0x00 36#define OMAP1510P1_EMIFS_PRI_VALUE 0x00
37#define OMAP1510P1_EMIFF_PRI_VALUE 0x00 37#define OMAP1510P1_EMIFF_PRI_VALUE 0x00
38 38
39/*
40 * These definitions define an area of FLASH set aside
41 * for the use of MTD/JFFS2. This is the area of flash
42 * that a JFFS2 filesystem will reside which is mounted
43 * at boot with the "root=/dev/mtdblock/0 rw"
44 * command line option. The flash address used here must
45 * fall within the legal range defined by rrload for storing
46 * the filesystem component. This address will be sufficiently
47 * deep into the overall flash range to avoid the other
48 * components also stored in flash such as the bootloader,
49 * the bootloader params, and the kernel.
50 * The SW2 settings for the map below are:
51 * 1 off, 2 off, 3 on, 4 off.
52 */
53
54/* Intel flash_0, partitioned as expected by rrload */
55#define OMAP_FLASH_0_BASE 0xD8000000
56#define OMAP_FLASH_0_START 0x00000000
57#define OMAP_FLASH_0_SIZE SZ_16M
58
59/* Intel flash_1, used for cramfs or other flash file systems */
60#define OMAP_FLASH_1_BASE 0xD9000000
61#define OMAP_FLASH_1_START 0x01000000
62#define OMAP_FLASH_1_SIZE SZ_16M
63
64#define NR_FPGA_IRQS 24 39#define NR_FPGA_IRQS 24
65#define NR_IRQS IH_BOARD_BASE + NR_FPGA_IRQS 40#define NR_IRQS IH_BOARD_BASE + NR_FPGA_IRQS
66 41
diff --git a/include/asm-arm/arch-omap/board-perseus2.h b/include/asm-arm/arch-omap/board-perseus2.h
index 0c224cc74fe4..691e52a52b43 100644
--- a/include/asm-arm/arch-omap/board-perseus2.h
+++ b/include/asm-arm/arch-omap/board-perseus2.h
@@ -36,23 +36,14 @@
36#define OMAP_SDRAM_DEVICE D256M_1X16_4B 36#define OMAP_SDRAM_DEVICE D256M_1X16_4B
37#endif 37#endif
38 38
39/*
40 * These definitions define an area of FLASH set aside
41 * for the use of MTD/JFFS2. This is the area of flash
42 * that a JFFS2 filesystem will reside which is mounted
43 * at boot with the "root=/dev/mtdblock/0 rw"
44 * command line option.
45 */
46
47/* Intel flash_0, partitioned as expected by rrload */
48#define OMAP_FLASH_0_BASE 0xD8000000 /* VA */
49#define OMAP_FLASH_0_START 0x00000000 /* PA */
50#define OMAP_FLASH_0_SIZE SZ_32M
51
52#define MAXIRQNUM IH_BOARD_BASE 39#define MAXIRQNUM IH_BOARD_BASE
53#define MAXFIQNUM MAXIRQNUM 40#define MAXFIQNUM MAXIRQNUM
54#define MAXSWINUM MAXIRQNUM 41#define MAXSWINUM MAXIRQNUM
55 42
56#define NR_IRQS (MAXIRQNUM + 1) 43#define NR_IRQS (MAXIRQNUM + 1)
57 44
45/* Samsung NAND flash at CS2B or CS3(NAND Boot) */
46#define OMAP_NAND_FLASH_START1 0x0A000000 /* CS2B */
47#define OMAP_NAND_FLASH_START2 0x0C000000 /* CS3 */
48
58#endif 49#endif
diff --git a/include/asm-arm/arch-omap/board-voiceblue.h b/include/asm-arm/arch-omap/board-voiceblue.h
index 33977b8956fb..ed6d346ee123 100644
--- a/include/asm-arm/arch-omap/board-voiceblue.h
+++ b/include/asm-arm/arch-omap/board-voiceblue.h
@@ -11,11 +11,6 @@
11#ifndef __ASM_ARCH_VOICEBLUE_H 11#ifndef __ASM_ARCH_VOICEBLUE_H
12#define __ASM_ARCH_VOICEBLUE_H 12#define __ASM_ARCH_VOICEBLUE_H
13 13
14#if (EXTERNAL_MAX_NR_PORTS < 4)
15#undef EXTERNAL_MAX_NR_PORTS
16#define EXTERNAL_MAX_NR_PORTS 4
17#endif
18
19extern void voiceblue_wdt_enable(void); 14extern void voiceblue_wdt_enable(void);
20extern void voiceblue_wdt_disable(void); 15extern void voiceblue_wdt_disable(void);
21extern void voiceblue_wdt_ping(void); 16extern void voiceblue_wdt_ping(void);
diff --git a/include/asm-arm/arch-omap/board.h b/include/asm-arm/arch-omap/board.h
index 95bd625480c1..a0040cd86639 100644
--- a/include/asm-arm/arch-omap/board.h
+++ b/include/asm-arm/arch-omap/board.h
@@ -30,10 +30,23 @@ struct omap_clock_config {
30 u8 system_clock_type; 30 u8 system_clock_type;
31}; 31};
32 32
33struct omap_mmc_conf {
34 unsigned enabled:1;
35 /* nomux means "standard" muxing is wrong on this board, and that
36 * board-specific code handled it before common init logic.
37 */
38 unsigned nomux:1;
39 /* switch pin can be for card detect (default) or card cover */
40 unsigned cover:1;
41 /* 4 wire signaling is optional, and is only used for SD/SDIO */
42 unsigned wire4:1;
43 s16 power_pin;
44 s16 switch_pin;
45 s16 wp_pin;
46};
47
33struct omap_mmc_config { 48struct omap_mmc_config {
34 u8 mmc_blocks; 49 struct omap_mmc_conf mmc[2];
35 s16 mmc1_power_pin, mmc2_power_pin;
36 s16 mmc1_switch_pin, mmc2_switch_pin;
37}; 50};
38 51
39struct omap_serial_console_config { 52struct omap_serial_console_config {
diff --git a/include/asm-arm/arch-omap/cpu.h b/include/asm-arm/arch-omap/cpu.h
index e8786713ee5c..1119e2b53e72 100644
--- a/include/asm-arm/arch-omap/cpu.h
+++ b/include/asm-arm/arch-omap/cpu.h
@@ -38,146 +38,179 @@ extern unsigned int system_rev;
38/* 38/*
39 * Test if multicore OMAP support is needed 39 * Test if multicore OMAP support is needed
40 */ 40 */
41#undef MULTI_OMAP 41#undef MULTI_OMAP1
42#undef MULTI_OMAP2
42#undef OMAP_NAME 43#undef OMAP_NAME
43 44
44#ifdef CONFIG_ARCH_OMAP730 45#ifdef CONFIG_ARCH_OMAP730
45# ifdef OMAP_NAME 46# ifdef OMAP_NAME
46# undef MULTI_OMAP 47# undef MULTI_OMAP1
47# define MULTI_OMAP 48# define MULTI_OMAP1
48# else 49# else
49# define OMAP_NAME omap730 50# define OMAP_NAME omap730
50# endif 51# endif
51#endif 52#endif
52#ifdef CONFIG_ARCH_OMAP1510 53#ifdef CONFIG_ARCH_OMAP1510
53# ifdef OMAP_NAME 54# ifdef OMAP_NAME
54# undef MULTI_OMAP 55# undef MULTI_OMAP1
55# define MULTI_OMAP 56# define MULTI_OMAP1
56# else 57# else
57# define OMAP_NAME omap1510 58# define OMAP_NAME omap1510
58# endif 59# endif
59#endif 60#endif
60#ifdef CONFIG_ARCH_OMAP16XX 61#ifdef CONFIG_ARCH_OMAP16XX
61# ifdef OMAP_NAME 62# ifdef OMAP_NAME
62# undef MULTI_OMAP 63# undef MULTI_OMAP1
63# define MULTI_OMAP 64# define MULTI_OMAP1
64# else 65# else
65# define OMAP_NAME omap1610 66# define OMAP_NAME omap16xx
66# endif 67# endif
67#endif 68#endif
68#ifdef CONFIG_ARCH_OMAP16XX 69#ifdef CONFIG_ARCH_OMAP24XX
69# ifdef OMAP_NAME 70# if (defined(OMAP_NAME) || defined(MULTI_OMAP1))
70# undef MULTI_OMAP 71# error "OMAP1 and OMAP2 can't be selected at the same time"
71# define MULTI_OMAP
72# else 72# else
73# define OMAP_NAME omap1710 73# undef MULTI_OMAP2
74# define OMAP_NAME omap24xx
74# endif 75# endif
75#endif 76#endif
76 77
77/* 78/*
78 * Generate various OMAP cpu specific macros, and cpu class 79 * Macros to group OMAP into cpu classes.
79 * specific macros 80 * These can be used in most places.
81 * cpu_is_omap7xx(): True for OMAP730
82 * cpu_is_omap15xx(): True for OMAP1510 and OMAP5910
83 * cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710
84 * cpu_is_omap24xx(): True for OMAP2420
80 */ 85 */
81#define GET_OMAP_TYPE ((system_rev >> 24) & 0xff)
82#define GET_OMAP_CLASS (system_rev & 0xff) 86#define GET_OMAP_CLASS (system_rev & 0xff)
83 87
84#define IS_OMAP_TYPE(type, id) \
85static inline int is_omap ##type (void) \
86{ \
87 return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
88}
89
90#define IS_OMAP_CLASS(class, id) \ 88#define IS_OMAP_CLASS(class, id) \
91static inline int is_omap ##class (void) \ 89static inline int is_omap ##class (void) \
92{ \ 90{ \
93 return (GET_OMAP_CLASS == (id)) ? 1 : 0; \ 91 return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
94} 92}
95 93
96IS_OMAP_TYPE(730, 0x07)
97IS_OMAP_TYPE(1510, 0x15)
98IS_OMAP_TYPE(1610, 0x16)
99IS_OMAP_TYPE(5912, 0x16)
100IS_OMAP_TYPE(1710, 0x17)
101IS_OMAP_TYPE(2420, 0x24)
102
103IS_OMAP_CLASS(7xx, 0x07) 94IS_OMAP_CLASS(7xx, 0x07)
104IS_OMAP_CLASS(15xx, 0x15) 95IS_OMAP_CLASS(15xx, 0x15)
105IS_OMAP_CLASS(16xx, 0x16) 96IS_OMAP_CLASS(16xx, 0x16)
106IS_OMAP_CLASS(24xx, 0x24) 97IS_OMAP_CLASS(24xx, 0x24)
107 98
108/* 99#define cpu_is_omap7xx() 0
109 * Macros to group OMAP types into cpu classes. 100#define cpu_is_omap15xx() 0
110 * These can be used in most places. 101#define cpu_is_omap16xx() 0
111 * cpu_is_omap15xx(): True for 1510 and 5910 102#define cpu_is_omap24xx() 0
112 * cpu_is_omap16xx(): True for 1610, 5912 and 1710 103
113 */ 104#if defined(MULTI_OMAP1)
114#if defined(MULTI_OMAP) 105# if defined(CONFIG_ARCH_OMAP730)
115# define cpu_is_omap7xx() is_omap7xx() 106# undef cpu_is_omap7xx
116# define cpu_is_omap15xx() is_omap15xx() 107# define cpu_is_omap7xx() is_omap7xx()
117# if !(defined(CONFIG_ARCH_OMAP1510) || defined(CONFIG_ARCH_OMAP730)) 108# endif
118# define cpu_is_omap16xx() 1 109# if defined(CONFIG_ARCH_OMAP1510)
119# else 110# undef cpu_is_omap15xx
111# define cpu_is_omap15xx() is_omap15xx()
112# endif
113# if defined(CONFIG_ARCH_OMAP16XX)
114# undef cpu_is_omap16xx
120# define cpu_is_omap16xx() is_omap16xx() 115# define cpu_is_omap16xx() is_omap16xx()
121# endif 116# endif
122#else 117#else
123# if defined(CONFIG_ARCH_OMAP730) 118# if defined(CONFIG_ARCH_OMAP730)
119# undef cpu_is_omap7xx
124# define cpu_is_omap7xx() 1 120# define cpu_is_omap7xx() 1
125# else
126# define cpu_is_omap7xx() 0
127# endif 121# endif
128# if defined(CONFIG_ARCH_OMAP1510) 122# if defined(CONFIG_ARCH_OMAP1510)
123# undef cpu_is_omap15xx
129# define cpu_is_omap15xx() 1 124# define cpu_is_omap15xx() 1
130# else
131# define cpu_is_omap15xx() 0
132# endif 125# endif
133# if defined(CONFIG_ARCH_OMAP16XX) 126# if defined(CONFIG_ARCH_OMAP16XX)
127# undef cpu_is_omap16xx
134# define cpu_is_omap16xx() 1 128# define cpu_is_omap16xx() 1
135# else 129# endif
136# define cpu_is_omap16xx() 0 130# if defined(CONFIG_ARCH_OMAP24XX)
131# undef cpu_is_omap24xx
132# define cpu_is_omap24xx() 1
137# endif 133# endif
138#endif 134#endif
139 135
140#if defined(MULTI_OMAP) 136/*
141# define cpu_is_omap730() is_omap730() 137 * Macros to detect individual cpu types.
142# define cpu_is_omap1510() is_omap1510() 138 * These are only rarely needed.
143# define cpu_is_omap1610() is_omap1610() 139 * cpu_is_omap730(): True for OMAP730
144# define cpu_is_omap5912() is_omap5912() 140 * cpu_is_omap1510(): True for OMAP1510
145# define cpu_is_omap1710() is_omap1710() 141 * cpu_is_omap1610(): True for OMAP1610
142 * cpu_is_omap1611(): True for OMAP1611
143 * cpu_is_omap5912(): True for OMAP5912
144 * cpu_is_omap1621(): True for OMAP1621
145 * cpu_is_omap1710(): True for OMAP1710
146 * cpu_is_omap2420(): True for OMAP2420
147 */
148#define GET_OMAP_TYPE ((system_rev >> 16) & 0xffff)
149
150#define IS_OMAP_TYPE(type, id) \
151static inline int is_omap ##type (void) \
152{ \
153 return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
154}
155
156IS_OMAP_TYPE(730, 0x0730)
157IS_OMAP_TYPE(1510, 0x1510)
158IS_OMAP_TYPE(1610, 0x1610)
159IS_OMAP_TYPE(1611, 0x1611)
160IS_OMAP_TYPE(5912, 0x1611)
161IS_OMAP_TYPE(1621, 0x1621)
162IS_OMAP_TYPE(1710, 0x1710)
163IS_OMAP_TYPE(2420, 0x2420)
164
165#define cpu_is_omap730() 0
166#define cpu_is_omap1510() 0
167#define cpu_is_omap1610() 0
168#define cpu_is_omap5912() 0
169#define cpu_is_omap1611() 0
170#define cpu_is_omap1621() 0
171#define cpu_is_omap1710() 0
172#define cpu_is_omap2420() 0
173
174#if defined(MULTI_OMAP1)
175# if defined(CONFIG_ARCH_OMAP730)
176# undef cpu_is_omap730
177# define cpu_is_omap730() is_omap730()
178# endif
179# if defined(CONFIG_ARCH_OMAP1510)
180# undef cpu_is_omap1510
181# define cpu_is_omap1510() is_omap1510()
182# endif
146#else 183#else
147# if defined(CONFIG_ARCH_OMAP730) 184# if defined(CONFIG_ARCH_OMAP730)
185# undef cpu_is_omap730
148# define cpu_is_omap730() 1 186# define cpu_is_omap730() 1
149# else
150# define cpu_is_omap730() 0
151# endif 187# endif
152# if defined(CONFIG_ARCH_OMAP1510) 188# if defined(CONFIG_ARCH_OMAP1510)
189# undef cpu_is_omap1510
153# define cpu_is_omap1510() 1 190# define cpu_is_omap1510() 1
154# else
155# define cpu_is_omap1510() 0
156# endif 191# endif
157# if defined(CONFIG_ARCH_OMAP16XX) 192#endif
158# define cpu_is_omap1610() 1 193
159# else 194/*
160# define cpu_is_omap1610() 0 195 * Whether we have MULTI_OMAP1 or not, we still need to distinguish
161# endif 196 * between 1611B/5912 and 1710.
162# if defined(CONFIG_ARCH_OMAP16XX) 197 */
163# define cpu_is_omap5912() 1 198#if defined(CONFIG_ARCH_OMAP16XX)
164# else 199# undef cpu_is_omap1610
165# define cpu_is_omap5912() 0 200# undef cpu_is_omap1611
166# endif 201# undef cpu_is_omap5912
167# if defined(CONFIG_ARCH_OMAP16XX) 202# undef cpu_is_omap1621
203# undef cpu_is_omap1710
168# define cpu_is_omap1610() is_omap1610() 204# define cpu_is_omap1610() is_omap1610()
205# define cpu_is_omap1611() is_omap1611()
169# define cpu_is_omap5912() is_omap5912() 206# define cpu_is_omap5912() is_omap5912()
207# define cpu_is_omap1621() is_omap1621()
170# define cpu_is_omap1710() is_omap1710() 208# define cpu_is_omap1710() is_omap1710()
171# else 209#endif
172# define cpu_is_omap1610() 0 210
173# define cpu_is_omap5912() 0 211#if defined(CONFIG_ARCH_OMAP2420)
174# define cpu_is_omap1710() 0 212# undef cpu_is_omap2420
175# endif
176# if defined(CONFIG_ARCH_OMAP2420)
177# define cpu_is_omap2420() 1 213# define cpu_is_omap2420() 1
178# else
179# define cpu_is_omap2420() 0
180# endif
181#endif 214#endif
182 215
183#endif 216#endif
diff --git a/include/asm-arm/arch-omap/debug-macro.S b/include/asm-arm/arch-omap/debug-macro.S
index 83bb458afd0b..ca4f577f9675 100644
--- a/include/asm-arm/arch-omap/debug-macro.S
+++ b/include/asm-arm/arch-omap/debug-macro.S
@@ -14,6 +14,7 @@
14 .macro addruart,rx 14 .macro addruart,rx
15 mrc p15, 0, \rx, c1, c0 15 mrc p15, 0, \rx, c1, c0
16 tst \rx, #1 @ MMU enabled? 16 tst \rx, #1 @ MMU enabled?
17#ifdef CONFIG_ARCH_OMAP1
17 moveq \rx, #0xff000000 @ physical base address 18 moveq \rx, #0xff000000 @ physical base address
18 movne \rx, #0xfe000000 @ virtual base 19 movne \rx, #0xfe000000 @ virtual base
19 orr \rx, \rx, #0x00fb0000 20 orr \rx, \rx, #0x00fb0000
@@ -23,6 +24,18 @@
23#if defined(CONFIG_OMAP_LL_DEBUG_UART2) || defined(CONFIG_OMAP_LL_DEBUG_UART3) 24#if defined(CONFIG_OMAP_LL_DEBUG_UART2) || defined(CONFIG_OMAP_LL_DEBUG_UART3)
24 orr \rx, \rx, #0x00000800 @ UART 2 & 3 25 orr \rx, \rx, #0x00000800 @ UART 2 & 3
25#endif 26#endif
27
28#elif CONFIG_ARCH_OMAP2
29 moveq \rx, #0x48000000 @ physical base address
30 movne \rx, #0xd8000000 @ virtual base
31 orr \rx, \rx, #0x0006a000
32#ifdef CONFIG_OMAP_LL_DEBUG_UART2
33 add \rx, \rx, #0x00002000 @ UART 2
34#endif
35#ifdef CONFIG_OMAP_LL_DEBUG_UART3
36 add \rx, \rx, #0x00004000 @ UART 3
37#endif
38#endif
26 .endm 39 .endm
27 40
28 .macro senduart,rd,rx 41 .macro senduart,rd,rx
diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h
index ce114ce5af5d..04ebef5c6e95 100644
--- a/include/asm-arm/arch-omap/dma.h
+++ b/include/asm-arm/arch-omap/dma.h
@@ -240,6 +240,7 @@ extern void omap_dma_unlink_lch (int lch_head, int lch_queue);
240 240
241extern dma_addr_t omap_get_dma_src_pos(int lch); 241extern dma_addr_t omap_get_dma_src_pos(int lch);
242extern dma_addr_t omap_get_dma_dst_pos(int lch); 242extern dma_addr_t omap_get_dma_dst_pos(int lch);
243extern int omap_get_dma_src_addr_counter(int lch);
243extern void omap_clear_dma(int lch); 244extern void omap_clear_dma(int lch);
244extern int omap_dma_running(void); 245extern int omap_dma_running(void);
245 246
diff --git a/include/asm-arm/arch-omap/dmtimer.h b/include/asm-arm/arch-omap/dmtimer.h
new file mode 100644
index 000000000000..11772c792f3e
--- /dev/null
+++ b/include/asm-arm/arch-omap/dmtimer.h
@@ -0,0 +1,92 @@
1/*
2 * linux/include/asm-arm/arm/arch-omap/dmtimer.h
3 *
4 * OMAP Dual-Mode Timers
5 *
6 * Copyright (C) 2005 Nokia Corporation
7 * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
20 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
21 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, write to the Free Software Foundation, Inc.,
25 * 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#ifndef __ASM_ARCH_TIMER_H
29#define __ASM_ARCH_TIMER_H
30
31#include <linux/list.h>
32
33#define OMAP_TIMER_SRC_ARMXOR 0x00
34#define OMAP_TIMER_SRC_32_KHZ 0x01
35#define OMAP_TIMER_SRC_EXT_CLK 0x02
36
37/* timer control reg bits */
38#define OMAP_TIMER_CTRL_CAPTMODE (1 << 13)
39#define OMAP_TIMER_CTRL_PT (1 << 12)
40#define OMAP_TIMER_CTRL_TRG_OVERFLOW (0x1 << 10)
41#define OMAP_TIMER_CTRL_TRG_OFANDMATCH (0x2 << 10)
42#define OMAP_TIMER_CTRL_TCM_LOWTOHIGH (0x1 << 8)
43#define OMAP_TIMER_CTRL_TCM_HIGHTOLOW (0x2 << 8)
44#define OMAP_TIMER_CTRL_TCM_BOTHEDGES (0x3 << 8)
45#define OMAP_TIMER_CTRL_SCPWM (1 << 7)
46#define OMAP_TIMER_CTRL_CE (1 << 6) /* compare enable */
47#define OMAP_TIMER_CTRL_PRE (1 << 5) /* prescaler enable */
48#define OMAP_TIMER_CTRL_PTV_SHIFT 2 /* how much to shift the prescaler value */
49#define OMAP_TIMER_CTRL_AR (1 << 1) /* auto-reload enable */
50#define OMAP_TIMER_CTRL_ST (1 << 0) /* start timer */
51
52/* timer interrupt enable bits */
53#define OMAP_TIMER_INT_CAPTURE (1 << 2)
54#define OMAP_TIMER_INT_OVERFLOW (1 << 1)
55#define OMAP_TIMER_INT_MATCH (1 << 0)
56
57
58struct omap_dm_timer {
59 struct list_head timer_list;
60
61 u32 base;
62 unsigned int irq;
63};
64
65u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, int reg);
66void omap_dm_timer_write_reg(struct omap_dm_timer *timer, int reg, u32 value);
67
68struct omap_dm_timer * omap_dm_timer_request(void);
69void omap_dm_timer_free(struct omap_dm_timer *timer);
70void omap_dm_timer_set_source(struct omap_dm_timer *timer, int source);
71
72void omap_dm_timer_set_int_enable(struct omap_dm_timer *timer, unsigned int value);
73void omap_dm_timer_set_trigger(struct omap_dm_timer *timer, unsigned int value);
74void omap_dm_timer_enable_compare(struct omap_dm_timer *timer);
75void omap_dm_timer_enable_autoreload(struct omap_dm_timer *timer);
76
77void omap_dm_timer_trigger(struct omap_dm_timer *timer);
78void omap_dm_timer_start(struct omap_dm_timer *timer);
79void omap_dm_timer_stop(struct omap_dm_timer *timer);
80
81void omap_dm_timer_set_load(struct omap_dm_timer *timer, unsigned int load);
82void omap_dm_timer_set_match(struct omap_dm_timer *timer, unsigned int match);
83
84unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer);
85void omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value);
86
87unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer);
88void omap_dm_timer_reset_counter(struct omap_dm_timer *timer);
89
90int omap_dm_timers_active(void);
91
92#endif /* __ASM_ARCH_TIMER_H */
diff --git a/include/asm-arm/arch-omap/dsp.h b/include/asm-arm/arch-omap/dsp.h
new file mode 100644
index 000000000000..57bf4f39ca58
--- /dev/null
+++ b/include/asm-arm/arch-omap/dsp.h
@@ -0,0 +1,244 @@
1/*
2 * linux/include/asm-arm/arch-omap/dsp.h
3 *
4 * Header for OMAP DSP driver
5 *
6 * Copyright (C) 2002-2005 Nokia Corporation
7 *
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * 2005/06/01: DSP Gateway version 3.3
25 */
26
27#ifndef ASM_ARCH_DSP_H
28#define ASM_ARCH_DSP_H
29
30
31/*
32 * for /dev/dspctl/ctl
33 */
34#define OMAP_DSP_IOCTL_RESET 1
35#define OMAP_DSP_IOCTL_RUN 2
36#define OMAP_DSP_IOCTL_SETRSTVECT 3
37#define OMAP_DSP_IOCTL_CPU_IDLE 4
38#define OMAP_DSP_IOCTL_MPUI_WORDSWAP_ON 5
39#define OMAP_DSP_IOCTL_MPUI_WORDSWAP_OFF 6
40#define OMAP_DSP_IOCTL_MPUI_BYTESWAP_ON 7
41#define OMAP_DSP_IOCTL_MPUI_BYTESWAP_OFF 8
42#define OMAP_DSP_IOCTL_GBL_IDLE 9
43#define OMAP_DSP_IOCTL_DSPCFG 10
44#define OMAP_DSP_IOCTL_DSPUNCFG 11
45#define OMAP_DSP_IOCTL_TASKCNT 12
46#define OMAP_DSP_IOCTL_POLL 13
47#define OMAP_DSP_IOCTL_REGMEMR 40
48#define OMAP_DSP_IOCTL_REGMEMW 41
49#define OMAP_DSP_IOCTL_REGIOR 42
50#define OMAP_DSP_IOCTL_REGIOW 43
51#define OMAP_DSP_IOCTL_GETVAR 44
52#define OMAP_DSP_IOCTL_SETVAR 45
53#define OMAP_DSP_IOCTL_RUNLEVEL 50
54#define OMAP_DSP_IOCTL_SUSPEND 51
55#define OMAP_DSP_IOCTL_RESUME 52
56#define OMAP_DSP_IOCTL_FBEN 53
57#define OMAP_DSP_IOCTL_FBDIS 54
58#define OMAP_DSP_IOCTL_MBSEND 99
59
60/*
61 * for taskdev
62 * (ioctls below should be >= 0x10000)
63 */
64#define OMAP_DSP_TASK_IOCTL_BFLSH 0x10000
65#define OMAP_DSP_TASK_IOCTL_SETBSZ 0x10001
66#define OMAP_DSP_TASK_IOCTL_LOCK 0x10002
67#define OMAP_DSP_TASK_IOCTL_UNLOCK 0x10003
68#define OMAP_DSP_TASK_IOCTL_GETNAME 0x10004
69
70/*
71 * for /dev/dspctl/mem
72 */
73#define OMAP_DSP_MEM_IOCTL_EXMAP 1
74#define OMAP_DSP_MEM_IOCTL_EXUNMAP 2
75#define OMAP_DSP_MEM_IOCTL_EXMAP_FLUSH 3
76#define OMAP_DSP_MEM_IOCTL_FBEXPORT 5
77#define OMAP_DSP_MEM_IOCTL_MMUITACK 7
78#define OMAP_DSP_MEM_IOCTL_MMUINIT 9
79#define OMAP_DSP_MEM_IOCTL_KMEM_RESERVE 11
80#define OMAP_DSP_MEM_IOCTL_KMEM_RELEASE 12
81
82struct omap_dsp_mapinfo {
83 unsigned long dspadr;
84 unsigned long size;
85};
86
87/*
88 * for /dev/dspctl/twch
89 */
90#define OMAP_DSP_TWCH_IOCTL_MKDEV 1
91#define OMAP_DSP_TWCH_IOCTL_RMDEV 2
92#define OMAP_DSP_TWCH_IOCTL_TADD 11
93#define OMAP_DSP_TWCH_IOCTL_TDEL 12
94#define OMAP_DSP_TWCH_IOCTL_TKILL 13
95
96#define OMAP_DSP_DEVSTATE_NOTASK 0x00000001
97#define OMAP_DSP_DEVSTATE_ATTACHED 0x00000002
98#define OMAP_DSP_DEVSTATE_GARBAGE 0x00000004
99#define OMAP_DSP_DEVSTATE_INVALID 0x00000008
100#define OMAP_DSP_DEVSTATE_ADDREQ 0x00000100
101#define OMAP_DSP_DEVSTATE_DELREQ 0x00000200
102#define OMAP_DSP_DEVSTATE_ADDFAIL 0x00001000
103#define OMAP_DSP_DEVSTATE_ADDING 0x00010000
104#define OMAP_DSP_DEVSTATE_DELING 0x00020000
105#define OMAP_DSP_DEVSTATE_KILLING 0x00040000
106#define OMAP_DSP_DEVSTATE_STATE_MASK 0x7fffffff
107#define OMAP_DSP_DEVSTATE_STALE 0x80000000
108
109struct omap_dsp_taddinfo {
110 unsigned char minor;
111 unsigned long taskadr;
112};
113#define OMAP_DSP_TADD_ABORTADR 0xffffffff
114
115
116/*
117 * error cause definition (for error detection device)
118 */
119#define OMAP_DSP_ERRDT_WDT 0x00000001
120#define OMAP_DSP_ERRDT_MMU 0x00000002
121
122
123/*
124 * mailbox protocol definitions
125 */
126
127struct omap_dsp_mailbox_cmd {
128 unsigned short cmd;
129 unsigned short data;
130};
131
132struct omap_dsp_reginfo {
133 unsigned short adr;
134 unsigned short val;
135};
136
137struct omap_dsp_varinfo {
138 unsigned char varid;
139 unsigned short val[0];
140};
141
142#define OMAP_DSP_MBPROT_REVISION 0x0019
143
144#define OMAP_DSP_MBCMD_WDSND 0x10
145#define OMAP_DSP_MBCMD_WDREQ 0x11
146#define OMAP_DSP_MBCMD_BKSND 0x20
147#define OMAP_DSP_MBCMD_BKREQ 0x21
148#define OMAP_DSP_MBCMD_BKYLD 0x23
149#define OMAP_DSP_MBCMD_BKSNDP 0x24
150#define OMAP_DSP_MBCMD_BKREQP 0x25
151#define OMAP_DSP_MBCMD_TCTL 0x30
152#define OMAP_DSP_MBCMD_TCTLDATA 0x31
153#define OMAP_DSP_MBCMD_POLL 0x32
154#define OMAP_DSP_MBCMD_WDT 0x50 /* v3.3: obsolete */
155#define OMAP_DSP_MBCMD_RUNLEVEL 0x51
156#define OMAP_DSP_MBCMD_PM 0x52
157#define OMAP_DSP_MBCMD_SUSPEND 0x53
158#define OMAP_DSP_MBCMD_KFUNC 0x54
159#define OMAP_DSP_MBCMD_TCFG 0x60
160#define OMAP_DSP_MBCMD_TADD 0x62
161#define OMAP_DSP_MBCMD_TDEL 0x63
162#define OMAP_DSP_MBCMD_TSTOP 0x65
163#define OMAP_DSP_MBCMD_DSPCFG 0x70
164#define OMAP_DSP_MBCMD_REGRW 0x72
165#define OMAP_DSP_MBCMD_GETVAR 0x74
166#define OMAP_DSP_MBCMD_SETVAR 0x75
167#define OMAP_DSP_MBCMD_ERR 0x78
168#define OMAP_DSP_MBCMD_DBG 0x79
169
170#define OMAP_DSP_MBCMD_TCTL_TINIT 0x0000
171#define OMAP_DSP_MBCMD_TCTL_TEN 0x0001
172#define OMAP_DSP_MBCMD_TCTL_TDIS 0x0002
173#define OMAP_DSP_MBCMD_TCTL_TCLR 0x0003
174#define OMAP_DSP_MBCMD_TCTL_TCLR_FORCE 0x0004
175
176#define OMAP_DSP_MBCMD_RUNLEVEL_USER 0x01
177#define OMAP_DSP_MBCMD_RUNLEVEL_SUPER 0x0e
178#define OMAP_DSP_MBCMD_RUNLEVEL_RECOVERY 0x10
179
180#define OMAP_DSP_MBCMD_PM_DISABLE 0x00
181#define OMAP_DSP_MBCMD_PM_ENABLE 0x01
182
183#define OMAP_DSP_MBCMD_KFUNC_FBCTL 0x00
184
185#define OMAP_DSP_MBCMD_FBCTL_ENABLE 0x0002
186#define OMAP_DSP_MBCMD_FBCTL_DISABLE 0x0003
187
188#define OMAP_DSP_MBCMD_TDEL_SAFE 0x0000
189#define OMAP_DSP_MBCMD_TDEL_KILL 0x0001
190
191#define OMAP_DSP_MBCMD_DSPCFG_REQ 0x00
192#define OMAP_DSP_MBCMD_DSPCFG_SYSADRH 0x28
193#define OMAP_DSP_MBCMD_DSPCFG_SYSADRL 0x29
194#define OMAP_DSP_MBCMD_DSPCFG_PROTREV 0x70
195#define OMAP_DSP_MBCMD_DSPCFG_ABORT 0x78
196#define OMAP_DSP_MBCMD_DSPCFG_LAST 0x80
197
198#define OMAP_DSP_MBCMD_REGRW_MEMR 0x00
199#define OMAP_DSP_MBCMD_REGRW_MEMW 0x01
200#define OMAP_DSP_MBCMD_REGRW_IOR 0x02
201#define OMAP_DSP_MBCMD_REGRW_IOW 0x03
202#define OMAP_DSP_MBCMD_REGRW_DATA 0x04
203
204#define OMAP_DSP_MBCMD_VARID_ICRMASK 0x00
205#define OMAP_DSP_MBCMD_VARID_LOADINFO 0x01
206
207#define OMAP_DSP_TTYP_ARCV 0x0001
208#define OMAP_DSP_TTYP_ASND 0x0002
209#define OMAP_DSP_TTYP_BKMD 0x0004
210#define OMAP_DSP_TTYP_BKDM 0x0008
211#define OMAP_DSP_TTYP_PVMD 0x0010
212#define OMAP_DSP_TTYP_PVDM 0x0020
213
214#define OMAP_DSP_EID_BADTID 0x10
215#define OMAP_DSP_EID_BADTCN 0x11
216#define OMAP_DSP_EID_BADBID 0x20
217#define OMAP_DSP_EID_BADCNT 0x21
218#define OMAP_DSP_EID_NOTLOCKED 0x22
219#define OMAP_DSP_EID_STVBUF 0x23
220#define OMAP_DSP_EID_BADADR 0x24
221#define OMAP_DSP_EID_BADTCTL 0x30
222#define OMAP_DSP_EID_BADPARAM 0x50
223#define OMAP_DSP_EID_FATAL 0x58
224#define OMAP_DSP_EID_NOMEM 0xc0
225#define OMAP_DSP_EID_NORES 0xc1
226#define OMAP_DSP_EID_IPBFULL 0xc2
227#define OMAP_DSP_EID_WDT 0xd0
228#define OMAP_DSP_EID_TASKNOTRDY 0xe0
229#define OMAP_DSP_EID_TASKBSY 0xe1
230#define OMAP_DSP_EID_TASKERR 0xef
231#define OMAP_DSP_EID_BADCFGTYP 0xf0
232#define OMAP_DSP_EID_DEBUG 0xf8
233#define OMAP_DSP_EID_BADSEQ 0xfe
234#define OMAP_DSP_EID_BADCMD 0xff
235
236#define OMAP_DSP_TNM_LEN 16
237
238#define OMAP_DSP_TID_FREE 0xff
239#define OMAP_DSP_TID_ANON 0xfe
240
241#define OMAP_DSP_BID_NULL 0xffff
242#define OMAP_DSP_BID_PVT 0xfffe
243
244#endif /* ASM_ARCH_DSP_H */
diff --git a/include/asm-arm/arch-omap/dsp_common.h b/include/asm-arm/arch-omap/dsp_common.h
new file mode 100644
index 000000000000..4fcce6944056
--- /dev/null
+++ b/include/asm-arm/arch-omap/dsp_common.h
@@ -0,0 +1,37 @@
1/*
2 * linux/include/asm-arm/arch-omap/dsp_common.h
3 *
4 * Header for OMAP DSP subsystem control
5 *
6 * Copyright (C) 2004,2005 Nokia Corporation
7 *
8 * Written by Toshihiro Kobayashi <toshihiro.kobayashi@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 * 2005/06/03: DSP Gateway version 3.3
25 */
26
27#ifndef ASM_ARCH_DSP_COMMON_H
28#define ASM_ARCH_DSP_COMMON_H
29
30void omap_dsp_pm_suspend(void);
31void omap_dsp_pm_resume(void);
32void omap_dsp_request_mpui(void);
33void omap_dsp_release_mpui(void);
34int omap_dsp_request_mem(void);
35int omap_dsp_release_mem(void);
36
37#endif /* ASM_ARCH_DSP_COMMON_H */
diff --git a/include/asm-arm/arch-omap/entry-macro.S b/include/asm-arm/arch-omap/entry-macro.S
index 57b126889b98..0d29b9c56a95 100644
--- a/include/asm-arm/arch-omap/entry-macro.S
+++ b/include/asm-arm/arch-omap/entry-macro.S
@@ -8,6 +8,8 @@
8 * warranty of any kind, whether express or implied. 8 * warranty of any kind, whether express or implied.
9 */ 9 */
10 10
11#if defined(CONFIG_ARCH_OMAP1)
12
11 .macro disable_fiq 13 .macro disable_fiq
12 .endm 14 .endm
13 15
@@ -30,3 +32,29 @@
301510: 321510:
31 .endm 33 .endm
32 34
35#elif defined(CONFIG_ARCH_OMAP24XX)
36
37#include <asm/arch/omap24xx.h>
38
39 .macro disable_fiq
40 .endm
41
42 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
43 ldr \base, =VA_IC_BASE
44 ldr \irqnr, [\base, #0x98] /* IRQ pending reg 1 */
45 cmp \irqnr, #0x0
46 bne 2222f
47 ldr \irqnr, [\base, #0xb8] /* IRQ pending reg 2 */
48 cmp \irqnr, #0x0
49 bne 2222f
50 ldr \irqnr, [\base, #0xd8] /* IRQ pending reg 3 */
51 cmp \irqnr, #0x0
522222:
53 ldrne \irqnr, [\base, #IRQ_SIR_IRQ]
54
55 .endm
56
57 .macro irq_prio_table
58 .endm
59
60#endif
diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h
index fad2fc93ee70..74cb2b93b700 100644
--- a/include/asm-arm/arch-omap/gpio.h
+++ b/include/asm-arm/arch-omap/gpio.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * OMAP GPIO handling defines and functions 4 * OMAP GPIO handling defines and functions
5 * 5 *
6 * Copyright (C) 2003 Nokia Corporation 6 * Copyright (C) 2003-2005 Nokia Corporation
7 * 7 *
8 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 8 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
9 * 9 *
@@ -30,7 +30,23 @@
30#include <asm/arch/irqs.h> 30#include <asm/arch/irqs.h>
31#include <asm/io.h> 31#include <asm/io.h>
32 32
33#define OMAP_MPUIO_BASE 0xfffb5000 33#define OMAP_MPUIO_BASE (void __iomem *)0xfffb5000
34
35#ifdef CONFIG_ARCH_OMAP730
36#define OMAP_MPUIO_INPUT_LATCH 0x00
37#define OMAP_MPUIO_OUTPUT 0x02
38#define OMAP_MPUIO_IO_CNTL 0x04
39#define OMAP_MPUIO_KBR_LATCH 0x08
40#define OMAP_MPUIO_KBC 0x0a
41#define OMAP_MPUIO_GPIO_EVENT_MODE 0x0c
42#define OMAP_MPUIO_GPIO_INT_EDGE 0x0e
43#define OMAP_MPUIO_KBD_INT 0x10
44#define OMAP_MPUIO_GPIO_INT 0x12
45#define OMAP_MPUIO_KBD_MASKIT 0x14
46#define OMAP_MPUIO_GPIO_MASKIT 0x16
47#define OMAP_MPUIO_GPIO_DEBOUNCING 0x18
48#define OMAP_MPUIO_LATCH 0x1a
49#else
34#define OMAP_MPUIO_INPUT_LATCH 0x00 50#define OMAP_MPUIO_INPUT_LATCH 0x00
35#define OMAP_MPUIO_OUTPUT 0x04 51#define OMAP_MPUIO_OUTPUT 0x04
36#define OMAP_MPUIO_IO_CNTL 0x08 52#define OMAP_MPUIO_IO_CNTL 0x08
@@ -44,6 +60,7 @@
44#define OMAP_MPUIO_GPIO_MASKIT 0x2c 60#define OMAP_MPUIO_GPIO_MASKIT 0x2c
45#define OMAP_MPUIO_GPIO_DEBOUNCING 0x30 61#define OMAP_MPUIO_GPIO_DEBOUNCING 0x30
46#define OMAP_MPUIO_LATCH 0x34 62#define OMAP_MPUIO_LATCH 0x34
63#endif
47 64
48#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr)) 65#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
49#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES) 66#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
@@ -52,18 +69,11 @@
52 IH_MPUIO_BASE + ((nr) & 0x0f) : \ 69 IH_MPUIO_BASE + ((nr) & 0x0f) : \
53 IH_GPIO_BASE + ((nr) & 0x3f)) 70 IH_GPIO_BASE + ((nr) & 0x3f))
54 71
55/* For EDGECTRL */
56#define OMAP_GPIO_NO_EDGE 0x00
57#define OMAP_GPIO_FALLING_EDGE 0x01
58#define OMAP_GPIO_RISING_EDGE 0x02
59#define OMAP_GPIO_BOTH_EDGES 0x03
60
61extern int omap_gpio_init(void); /* Call from board init only */ 72extern int omap_gpio_init(void); /* Call from board init only */
62extern int omap_request_gpio(int gpio); 73extern int omap_request_gpio(int gpio);
63extern void omap_free_gpio(int gpio); 74extern void omap_free_gpio(int gpio);
64extern void omap_set_gpio_direction(int gpio, int is_input); 75extern void omap_set_gpio_direction(int gpio, int is_input);
65extern void omap_set_gpio_dataout(int gpio, int enable); 76extern void omap_set_gpio_dataout(int gpio, int enable);
66extern int omap_get_gpio_datain(int gpio); 77extern int omap_get_gpio_datain(int gpio);
67extern void omap_set_gpio_edge_ctrl(int gpio, int edge);
68 78
69#endif 79#endif
diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h
index 48258c7f6541..60201e1dd6ad 100644
--- a/include/asm-arm/arch-omap/hardware.h
+++ b/include/asm-arm/arch-omap/hardware.h
@@ -43,6 +43,7 @@
43#include <asm/arch/cpu.h> 43#include <asm/arch/cpu.h>
44#endif 44#endif
45#include <asm/arch/io.h> 45#include <asm/arch/io.h>
46#include <asm/arch/serial.h>
46 47
47/* 48/*
48 * --------------------------------------------------------------------------- 49 * ---------------------------------------------------------------------------
@@ -89,11 +90,12 @@
89/* DPLL control registers */ 90/* DPLL control registers */
90#define DPLL_CTL (0xfffecf00) 91#define DPLL_CTL (0xfffecf00)
91 92
92/* DSP clock control */ 93/* DSP clock control. Must use __raw_readw() and __raw_writew() with these */
93#define DSP_CONFIG_REG_BASE (0xe1008000) 94#define DSP_CONFIG_REG_BASE (0xe1008000)
94#define DSP_CKCTL (DSP_CONFIG_REG_BASE + 0x0) 95#define DSP_CKCTL (DSP_CONFIG_REG_BASE + 0x0)
95#define DSP_IDLECT1 (DSP_CONFIG_REG_BASE + 0x4) 96#define DSP_IDLECT1 (DSP_CONFIG_REG_BASE + 0x4)
96#define DSP_IDLECT2 (DSP_CONFIG_REG_BASE + 0x8) 97#define DSP_IDLECT2 (DSP_CONFIG_REG_BASE + 0x8)
98#define DSP_RSTCT2 (DSP_CONFIG_REG_BASE + 0x14)
97 99
98/* 100/*
99 * --------------------------------------------------------------------------- 101 * ---------------------------------------------------------------------------
@@ -142,6 +144,13 @@
142 * Interrupts 144 * Interrupts
143 * --------------------------------------------------------------------------- 145 * ---------------------------------------------------------------------------
144 */ 146 */
147#ifdef CONFIG_ARCH_OMAP1
148
149/*
150 * XXX: These probably want to be moved to arch/arm/mach-omap/omap1/irq.c
151 * or something similar.. -- PFM.
152 */
153
145#define OMAP_IH1_BASE 0xfffecb00 154#define OMAP_IH1_BASE 0xfffecb00
146#define OMAP_IH2_BASE 0xfffe0000 155#define OMAP_IH2_BASE 0xfffe0000
147 156
@@ -170,6 +179,8 @@
170#define IRQ_ILR0_REG_OFFSET 0x1c 179#define IRQ_ILR0_REG_OFFSET 0x1c
171#define IRQ_GMR_REG_OFFSET 0xa0 180#define IRQ_GMR_REG_OFFSET 0xa0
172 181
182#endif
183
173/* 184/*
174 * ---------------------------------------------------------------------------- 185 * ----------------------------------------------------------------------------
175 * System control registers 186 * System control registers
@@ -260,32 +271,17 @@
260 271
261/* 272/*
262 * --------------------------------------------------------------------------- 273 * ---------------------------------------------------------------------------
263 * Serial ports
264 * ---------------------------------------------------------------------------
265 */
266#define OMAP_UART1_BASE (unsigned char *)0xfffb0000
267#define OMAP_UART2_BASE (unsigned char *)0xfffb0800
268#define OMAP_UART3_BASE (unsigned char *)0xfffb9800
269#define OMAP_MAX_NR_PORTS 3
270#define OMAP1510_BASE_BAUD (12000000/16)
271#define OMAP16XX_BASE_BAUD (48000000/16)
272
273#define is_omap_port(p) ({int __ret = 0; \
274 if (p == IO_ADDRESS(OMAP_UART1_BASE) || \
275 p == IO_ADDRESS(OMAP_UART2_BASE) || \
276 p == IO_ADDRESS(OMAP_UART3_BASE)) \
277 __ret = 1; \
278 __ret; \
279 })
280
281/*
282 * ---------------------------------------------------------------------------
283 * Processor specific defines 274 * Processor specific defines
284 * --------------------------------------------------------------------------- 275 * ---------------------------------------------------------------------------
285 */ 276 */
286 277
287#include "omap730.h" 278#include "omap730.h"
288#include "omap1510.h" 279#include "omap1510.h"
280
281#ifdef CONFIG_ARCH_OMAP24XX
282#include "omap24xx.h"
283#endif
284
289#include "omap16xx.h" 285#include "omap16xx.h"
290 286
291/* 287/*
@@ -312,7 +308,6 @@
312 308
313#ifdef CONFIG_MACH_OMAP_H4 309#ifdef CONFIG_MACH_OMAP_H4
314#include "board-h4.h" 310#include "board-h4.h"
315#error "Support for H4 board not yet implemented."
316#endif 311#endif
317 312
318#ifdef CONFIG_MACH_OMAP_OSK 313#ifdef CONFIG_MACH_OMAP_OSK
diff --git a/include/asm-arm/arch-omap/io.h b/include/asm-arm/arch-omap/io.h
index 1c8c9fcc766e..11fbf629bf75 100644
--- a/include/asm-arm/arch-omap/io.h
+++ b/include/asm-arm/arch-omap/io.h
@@ -49,16 +49,24 @@
49 * I/O mapping 49 * I/O mapping
50 * ---------------------------------------------------------------------------- 50 * ----------------------------------------------------------------------------
51 */ 51 */
52#define IO_PHYS 0xFFFB0000
53#define IO_OFFSET 0x01000000 /* Virtual IO = 0xfefb0000 */
54#define IO_VIRT (IO_PHYS - IO_OFFSET)
55#define IO_SIZE 0x40000
56#define IO_ADDRESS(x) ((x) - IO_OFFSET)
57 52
58#define PCIO_BASE 0 53#if defined(CONFIG_ARCH_OMAP1)
54#define IO_PHYS 0xFFFB0000
55#define IO_OFFSET -0x01000000 /* Virtual IO = 0xfefb0000 */
56#define IO_SIZE 0x40000
59 57
60#define io_p2v(x) ((x) - IO_OFFSET) 58#elif defined(CONFIG_ARCH_OMAP2)
61#define io_v2p(x) ((x) + IO_OFFSET) 59#define IO_PHYS 0x48000000 /* L4 peripherals; other stuff has to be mapped *
60 * manually. */
61#define IO_OFFSET 0x90000000 /* Virtual IO = 0xd8000000 */
62#define IO_SIZE 0x08000000
63#endif
64
65#define IO_VIRT (IO_PHYS + IO_OFFSET)
66#define IO_ADDRESS(x) ((x) + IO_OFFSET)
67#define PCIO_BASE 0
68#define io_p2v(x) ((x) + IO_OFFSET)
69#define io_v2p(x) ((x) - IO_OFFSET)
62 70
63#ifndef __ASSEMBLER__ 71#ifndef __ASSEMBLER__
64 72
@@ -96,6 +104,8 @@ typedef struct { volatile u32 offset[4096]; } __regbase32;
96 ->offset[((vaddr)&4095)>>2] 104 ->offset[((vaddr)&4095)>>2]
97#define __REG32(paddr) __REGV32(io_p2v(paddr)) 105#define __REG32(paddr) __REGV32(io_p2v(paddr))
98 106
107extern void omap_map_common_io(void);
108
99#else 109#else
100 110
101#define __REG8(paddr) io_p2v(paddr) 111#define __REG8(paddr) io_p2v(paddr)
diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h
index 0d05a7c957d1..74e108ccac16 100644
--- a/include/asm-arm/arch-omap/irqs.h
+++ b/include/asm-arm/arch-omap/irqs.h
@@ -135,7 +135,6 @@
135/* 135/*
136 * OMAP-1510 specific IRQ numbers for interrupt handler 2 136 * OMAP-1510 specific IRQ numbers for interrupt handler 2
137 */ 137 */
138#define INT_1510_OS_32kHz_TIMER (22 + IH2_BASE)
139#define INT_1510_COM_SPI_RO (31 + IH2_BASE) 138#define INT_1510_COM_SPI_RO (31 + IH2_BASE)
140 139
141/* 140/*
@@ -232,6 +231,11 @@
232#define INT_730_DMA_CH15 (62 + IH2_BASE) 231#define INT_730_DMA_CH15 (62 + IH2_BASE)
233#define INT_730_NAND (63 + IH2_BASE) 232#define INT_730_NAND (63 + IH2_BASE)
234 233
234#define INT_24XX_GPIO_BANK1 29
235#define INT_24XX_GPIO_BANK2 30
236#define INT_24XX_GPIO_BANK3 31
237#define INT_24XX_GPIO_BANK4 32
238
235/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and 239/* Max. 128 level 2 IRQs (OMAP1610), 192 GPIOs (OMAP730) and
236 * 16 MPUIO lines */ 240 * 16 MPUIO lines */
237#define OMAP_MAX_GPIO_LINES 192 241#define OMAP_MAX_GPIO_LINES 192
diff --git a/include/asm-arm/arch-omap/memory.h b/include/asm-arm/arch-omap/memory.h
index f6b57dd846a3..84f81e315a25 100644
--- a/include/asm-arm/arch-omap/memory.h
+++ b/include/asm-arm/arch-omap/memory.h
@@ -36,12 +36,11 @@
36/* 36/*
37 * Physical DRAM offset. 37 * Physical DRAM offset.
38 */ 38 */
39#if defined(CONFIG_ARCH_OMAP1)
39#define PHYS_OFFSET (0x10000000UL) 40#define PHYS_OFFSET (0x10000000UL)
40 41#elif defined(CONFIG_ARCH_OMAP2)
41/* 42#define PHYS_OFFSET (0x80000000UL)
42 * OMAP-1510 Local Bus address offset 43#endif
43 */
44#define OMAP1510_LB_OFFSET (0x30000000UL)
45 44
46/* 45/*
47 * Conversion between SDRAM and fake PCI bus, used by USB 46 * Conversion between SDRAM and fake PCI bus, used by USB
@@ -64,6 +63,11 @@
64 */ 63 */
65#ifdef CONFIG_ARCH_OMAP1510 64#ifdef CONFIG_ARCH_OMAP1510
66 65
66/*
67 * OMAP-1510 Local Bus address offset
68 */
69#define OMAP1510_LB_OFFSET (0x30000000UL)
70
67#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET) 71#define virt_to_lbus(x) ((x) - PAGE_OFFSET + OMAP1510_LB_OFFSET)
68#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET) 72#define lbus_to_virt(x) ((x) - OMAP1510_LB_OFFSET + PAGE_OFFSET)
69#define is_lbus_device(dev) (cpu_is_omap1510() && dev && (strncmp(dev->bus_id, "ohci", 4) == 0)) 73#define is_lbus_device(dev) (cpu_is_omap1510() && dev && (strncmp(dev->bus_id, "ohci", 4) == 0))
diff --git a/include/asm-arm/arch-omap/mtd-xip.h b/include/asm-arm/arch-omap/mtd-xip.h
new file mode 100644
index 000000000000..a73a28571fee
--- /dev/null
+++ b/include/asm-arm/arch-omap/mtd-xip.h
@@ -0,0 +1,61 @@
1/*
2 * MTD primitives for XIP support. Architecture specific functions.
3 *
4 * Do not include this file directly. It's included from linux/mtd/xip.h
5 *
6 * Author: Vladimir Barinov <vbarinov@ru.mvista.com>
7 *
8 * (c) 2005 MontaVista Software, Inc. This file is licensed under the
9 * terms of the GNU General Public License version 2. This program is
10 * licensed "as is" without any warranty of any kind, whether express or
11 * implied.
12 */
13
14#ifndef __ARCH_OMAP_MTD_XIP_H__
15#define __ARCH_OMAP_MTD_XIP_H__
16
17#include <asm/hardware.h>
18#define OMAP_MPU_TIMER_BASE (0xfffec500)
19#define OMAP_MPU_TIMER_OFFSET 0x100
20
21typedef struct {
22 u32 cntl; /* CNTL_TIMER, R/W */
23 u32 load_tim; /* LOAD_TIM, W */
24 u32 read_tim; /* READ_TIM, R */
25} xip_omap_mpu_timer_regs_t;
26
27#define xip_omap_mpu_timer_base(n) \
28((volatile xip_omap_mpu_timer_regs_t*)IO_ADDRESS(OMAP_MPU_TIMER_BASE + \
29 (n)*OMAP_MPU_TIMER_OFFSET))
30
31static inline unsigned long xip_omap_mpu_timer_read(int nr)
32{
33 volatile xip_omap_mpu_timer_regs_t* timer = xip_omap_mpu_timer_base(nr);
34 return timer->read_tim;
35}
36
37#define xip_irqpending() \
38 (omap_readl(OMAP_IH1_ITR) & ~omap_readl(OMAP_IH1_MIR))
39#define xip_currtime() (~xip_omap_mpu_timer_read(0))
40
41/*
42 * It's permitted to do approxmation for xip_elapsed_since macro
43 * (see linux/mtd/xip.h)
44 */
45
46#ifdef CONFIG_MACH_OMAP_PERSEUS2
47#define xip_elapsed_since(x) (signed)((~xip_omap_mpu_timer_read(0) - (x)) / 7)
48#else
49#define xip_elapsed_since(x) (signed)((~xip_omap_mpu_timer_read(0) - (x)) / 6)
50#endif
51
52/*
53 * xip_cpu_idle() is used when waiting for a delay equal or larger than
54 * the system timer tick period. This should put the CPU into idle mode
55 * to save power and to be woken up only when some interrupts are pending.
56 * As above, this should not rely upon standard kernel code.
57 */
58
59#define xip_cpu_idle() asm volatile ("mcr p15, 0, %0, c7, c0, 4" :: "r" (1))
60
61#endif /* __ARCH_OMAP_MTD_XIP_H__ */
diff --git a/include/asm-arm/arch-omap/mux.h b/include/asm-arm/arch-omap/mux.h
index 5bd3f0097fc6..1b1ad4105349 100644
--- a/include/asm-arm/arch-omap/mux.h
+++ b/include/asm-arm/arch-omap/mux.h
@@ -185,6 +185,7 @@ typedef enum {
185 185
186 /* MPUIO */ 186 /* MPUIO */
187 MPUIO2, 187 MPUIO2,
188 N15_1610_MPUIO2,
188 MPUIO4, 189 MPUIO4,
189 MPUIO5, 190 MPUIO5,
190 T20_1610_MPUIO5, 191 T20_1610_MPUIO5,
@@ -210,6 +211,7 @@ typedef enum {
210 211
211 /* Misc ballouts */ 212 /* Misc ballouts */
212 BALLOUT_V8_ARMIO3, 213 BALLOUT_V8_ARMIO3,
214 N20_HDQ,
213 215
214 /* OMAP-1610 MMC2 */ 216 /* OMAP-1610 MMC2 */
215 W8_1610_MMC2_DAT0, 217 W8_1610_MMC2_DAT0,
@@ -235,6 +237,7 @@ typedef enum {
235 P20_1610_GPIO4, 237 P20_1610_GPIO4,
236 V9_1610_GPIO7, 238 V9_1610_GPIO7,
237 W8_1610_GPIO9, 239 W8_1610_GPIO9,
240 N20_1610_GPIO11,
238 N19_1610_GPIO13, 241 N19_1610_GPIO13,
239 P10_1610_GPIO22, 242 P10_1610_GPIO22,
240 V5_1610_GPIO24, 243 V5_1610_GPIO24,
@@ -250,7 +253,7 @@ typedef enum {
250 U18_1610_UWIRE_SDI, 253 U18_1610_UWIRE_SDI,
251 W21_1610_UWIRE_SDO, 254 W21_1610_UWIRE_SDO,
252 N14_1610_UWIRE_CS0, 255 N14_1610_UWIRE_CS0,
253 P15_1610_UWIRE_CS0, 256 P15_1610_UWIRE_CS3,
254 N15_1610_UWIRE_CS1, 257 N15_1610_UWIRE_CS1,
255 258
256 /* OMAP-1610 Flash */ 259 /* OMAP-1610 Flash */
@@ -411,7 +414,8 @@ MUX_CFG("N21_1710_GPIO14", 6, 9, 0, 1, 1, 1, 1, 1, 1)
411MUX_CFG("W15_1710_GPIO40", 9, 27, 7, 2, 5, 1, 2, 1, 1) 414MUX_CFG("W15_1710_GPIO40", 9, 27, 7, 2, 5, 1, 2, 1, 1)
412 415
413/* MPUIO */ 416/* MPUIO */
414MUX_CFG("MPUIO2", 7, 18, 0, 1, 1, 1, NA, 0, 1) 417MUX_CFG("MPUIO2", 7, 18, 0, 1, 14, 1, NA, 0, 1)
418MUX_CFG("N15_1610_MPUIO2", 7, 18, 0, 1, 14, 1, 1, 0, 1)
415MUX_CFG("MPUIO4", 7, 15, 0, 1, 13, 1, NA, 0, 1) 419MUX_CFG("MPUIO4", 7, 15, 0, 1, 13, 1, NA, 0, 1)
416MUX_CFG("MPUIO5", 7, 12, 0, 1, 12, 1, NA, 0, 1) 420MUX_CFG("MPUIO5", 7, 12, 0, 1, 12, 1, NA, 0, 1)
417 421
@@ -438,6 +442,7 @@ MUX_CFG("MCBSP3_CLKX", 9, 3, 1, 1, 29, 0, NA, 0, 1)
438 442
439/* Misc ballouts */ 443/* Misc ballouts */
440MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1) 444MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1)
445MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0)
441 446
442/* OMAP-1610 MMC2 */ 447/* OMAP-1610 MMC2 */
443MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1) 448MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1)
@@ -463,6 +468,7 @@ MUX_CFG("J18_1610_ETM_D7", 5, 27, 1, 0, 19, 0, 0, 0, 1)
463MUX_CFG("P20_1610_GPIO4", 6, 27, 0, 1, 7, 0, 1, 1, 1) 468MUX_CFG("P20_1610_GPIO4", 6, 27, 0, 1, 7, 0, 1, 1, 1)
464MUX_CFG("V9_1610_GPIO7", B, 12, 1, 2, 20, 0, 2, 1, 1) 469MUX_CFG("V9_1610_GPIO7", B, 12, 1, 2, 20, 0, 2, 1, 1)
465MUX_CFG("W8_1610_GPIO9", B, 21, 0, 2, 23, 0, 2, 1, 1) 470MUX_CFG("W8_1610_GPIO9", B, 21, 0, 2, 23, 0, 2, 1, 1)
471MUX_CFG("N20_1610_GPIO11", 6, 18, 0, 1, 4, 0, 1, 1, 1)
466MUX_CFG("N19_1610_GPIO13", 6, 12, 0, 1, 2, 0, 1, 1, 1) 472MUX_CFG("N19_1610_GPIO13", 6, 12, 0, 1, 2, 0, 1, 1, 1)
467MUX_CFG("P10_1610_GPIO22", C, 0, 7, 2, 26, 0, 2, 1, 1) 473MUX_CFG("P10_1610_GPIO22", C, 0, 7, 2, 26, 0, 2, 1, 1)
468MUX_CFG("V5_1610_GPIO24", B, 15, 7, 2, 21, 0, 2, 1, 1) 474MUX_CFG("V5_1610_GPIO24", B, 15, 7, 2, 21, 0, 2, 1, 1)
diff --git a/include/asm-arm/arch-omap/omap1510.h b/include/asm-arm/arch-omap/omap1510.h
index f491a48ef2e1..f086a3933906 100644
--- a/include/asm-arm/arch-omap/omap1510.h
+++ b/include/asm-arm/arch-omap/omap1510.h
@@ -36,10 +36,6 @@
36 36
37/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */ 37/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */
38 38
39#define OMAP1510_SRAM_BASE 0xD0000000
40#define OMAP1510_SRAM_SIZE (SZ_128K + SZ_64K)
41#define OMAP1510_SRAM_START 0x20000000
42
43#define OMAP1510_DSP_BASE 0xE0000000 39#define OMAP1510_DSP_BASE 0xE0000000
44#define OMAP1510_DSP_SIZE 0x28000 40#define OMAP1510_DSP_SIZE 0x28000
45#define OMAP1510_DSP_START 0xE0000000 41#define OMAP1510_DSP_START 0xE0000000
@@ -48,14 +44,5 @@
48#define OMAP1510_DSPREG_SIZE SZ_128K 44#define OMAP1510_DSPREG_SIZE SZ_128K
49#define OMAP1510_DSPREG_START 0xE1000000 45#define OMAP1510_DSPREG_START 0xE1000000
50 46
51/*
52 * ----------------------------------------------------------------------------
53 * Memory used by power management
54 * ----------------------------------------------------------------------------
55 */
56
57#define OMAP1510_SRAM_IDLE_SUSPEND (OMAP1510_SRAM_BASE + OMAP1510_SRAM_SIZE - 0x200)
58#define OMAP1510_SRAM_API_SUSPEND (OMAP1510_SRAM_IDLE_SUSPEND + 0x100)
59
60#endif /* __ASM_ARCH_OMAP1510_H */ 47#endif /* __ASM_ARCH_OMAP1510_H */
61 48
diff --git a/include/asm-arm/arch-omap/omap16xx.h b/include/asm-arm/arch-omap/omap16xx.h
index 38a9b95e6a33..f0c7f0fb4dc0 100644
--- a/include/asm-arm/arch-omap/omap16xx.h
+++ b/include/asm-arm/arch-omap/omap16xx.h
@@ -36,11 +36,6 @@
36 36
37/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */ 37/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */
38 38
39#define OMAP16XX_SRAM_BASE 0xD0000000
40#define OMAP1610_SRAM_SIZE (SZ_16K)
41#define OMAP5912_SRAM_SIZE 0x3E800
42#define OMAP16XX_SRAM_START 0x20000000
43
44#define OMAP16XX_DSP_BASE 0xE0000000 39#define OMAP16XX_DSP_BASE 0xE0000000
45#define OMAP16XX_DSP_SIZE 0x28000 40#define OMAP16XX_DSP_SIZE 0x28000
46#define OMAP16XX_DSP_START 0xE0000000 41#define OMAP16XX_DSP_START 0xE0000000
@@ -50,17 +45,6 @@
50#define OMAP16XX_DSPREG_START 0xE1000000 45#define OMAP16XX_DSPREG_START 0xE1000000
51 46
52/* 47/*
53 * ----------------------------------------------------------------------------
54 * Memory used by power management
55 * ----------------------------------------------------------------------------
56 */
57
58#define OMAP1610_SRAM_IDLE_SUSPEND (OMAP16XX_SRAM_BASE + OMAP1610_SRAM_SIZE - 0x200)
59#define OMAP1610_SRAM_API_SUSPEND (OMAP1610_SRAM_IDLE_SUSPEND + 0x100)
60#define OMAP5912_SRAM_IDLE_SUSPEND (OMAP16XX_SRAM_BASE + OMAP5912_SRAM_SIZE - 0x200)
61#define OMAP5912_SRAM_API_SUSPEND (OMAP5912_SRAM_IDLE_SUSPEND + 0x100)
62
63/*
64 * --------------------------------------------------------------------------- 48 * ---------------------------------------------------------------------------
65 * Interrupts 49 * Interrupts
66 * --------------------------------------------------------------------------- 50 * ---------------------------------------------------------------------------
diff --git a/include/asm-arm/arch-omap/omap24xx.h b/include/asm-arm/arch-omap/omap24xx.h
new file mode 100644
index 000000000000..a9105466a417
--- /dev/null
+++ b/include/asm-arm/arch-omap/omap24xx.h
@@ -0,0 +1,15 @@
1#ifndef __ASM_ARCH_OMAP24XX_H
2#define __ASM_ARCH_OMAP24XX_H
3
4#define OMAP24XX_L4_IO_BASE 0x48000000
5
6/* interrupt controller */
7#define OMAP24XX_IC_BASE (OMAP24XX_L4_IO_BASE + 0xfe000)
8#define VA_IC_BASE IO_ADDRESS(OMAP24XX_IC_BASE)
9
10#define OMAP24XX_IVA_INTC_BASE 0x40000000
11
12#define IRQ_SIR_IRQ 0x0040
13
14#endif /* __ASM_ARCH_OMAP24XX_H */
15
diff --git a/include/asm-arm/arch-omap/omap730.h b/include/asm-arm/arch-omap/omap730.h
index 599ab00f5488..755b64c5e9f0 100644
--- a/include/asm-arm/arch-omap/omap730.h
+++ b/include/asm-arm/arch-omap/omap730.h
@@ -36,10 +36,6 @@
36 36
37/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */ 37/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */
38 38
39#define OMAP730_SRAM_BASE 0xD0000000
40#define OMAP730_SRAM_SIZE (SZ_128K + SZ_64K + SZ_8K)
41#define OMAP730_SRAM_START 0x20000000
42
43#define OMAP730_DSP_BASE 0xE0000000 39#define OMAP730_DSP_BASE 0xE0000000
44#define OMAP730_DSP_SIZE 0x50000 40#define OMAP730_DSP_SIZE 0x50000
45#define OMAP730_DSP_START 0xE0000000 41#define OMAP730_DSP_START 0xE0000000
diff --git a/include/asm-arm/arch-omap/pm.h b/include/asm-arm/arch-omap/pm.h
index f209fc0953fb..fbd742d0c499 100644
--- a/include/asm-arm/arch-omap/pm.h
+++ b/include/asm-arm/arch-omap/pm.h
@@ -61,7 +61,10 @@
61#define PER_EN 0x1 61#define PER_EN 0x1
62 62
63#define CPU_SUSPEND_SIZE 200 63#define CPU_SUSPEND_SIZE 200
64#define ULPD_LOW_POWER_EN 0x0001 64#define ULPD_LOW_PWR_EN 0x0001
65#define ULPD_DEEP_SLEEP_TRANSITION_EN 0x0010
66#define ULPD_SETUP_ANALOG_CELL_3_VAL 0
67#define ULPD_POWER_CTRL_REG_VAL 0x0219
65 68
66#define DSP_IDLE_DELAY 10 69#define DSP_IDLE_DELAY 10
67#define DSP_IDLE 0x0040 70#define DSP_IDLE 0x0040
@@ -86,46 +89,35 @@
86#define OMAP1510_BIG_SLEEP_REQUEST 0x0cc5 89#define OMAP1510_BIG_SLEEP_REQUEST 0x0cc5
87#define OMAP1510_IDLE_LOOP_REQUEST 0x0c00 90#define OMAP1510_IDLE_LOOP_REQUEST 0x0c00
88#define OMAP1510_IDLE_CLOCK_DOMAINS 0x2 91#define OMAP1510_IDLE_CLOCK_DOMAINS 0x2
89#define OMAP1510_ULPD_LOW_POWER_REQ 0x0001
90 92
91#define OMAP1610_DEEP_SLEEP_REQUEST 0x17c7 93/* Both big sleep and deep sleep use same values. Difference is in ULPD. */
92#define OMAP1610_BIG_SLEEP_REQUEST TBD 94#define OMAP1610_IDLECT1_SLEEP_VAL 0x13c7
95#define OMAP1610_IDLECT2_SLEEP_VAL 0x09c7
96#define OMAP1610_IDLECT3_VAL 0x3f
97#define OMAP1610_IDLECT3_SLEEP_ORMASK 0x2c
98#define OMAP1610_IDLECT3 0xfffece24
93#define OMAP1610_IDLE_LOOP_REQUEST 0x0400 99#define OMAP1610_IDLE_LOOP_REQUEST 0x0400
94#define OMAP1610_IDLE_CLOCK_DOMAINS 0x09c7
95#define OMAP1610_ULPD_LOW_POWER_REQ 0x3
96
97#ifndef OMAP1510_SRAM_IDLE_SUSPEND
98#define OMAP1510_SRAM_IDLE_SUSPEND 0
99#endif
100#ifndef OMAP1610_SRAM_IDLE_SUSPEND
101#define OMAP1610_SRAM_IDLE_SUSPEND 0
102#endif
103#ifndef OMAP5912_SRAM_IDLE_SUSPEND
104#define OMAP5912_SRAM_IDLE_SUSPEND 0
105#endif
106
107#ifndef OMAP1510_SRAM_API_SUSPEND
108#define OMAP1510_SRAM_API_SUSPEND 0
109#endif
110#ifndef OMAP1610_SRAM_API_SUSPEND
111#define OMAP1610_SRAM_API_SUSPEND 0
112#endif
113#ifndef OMAP5912_SRAM_API_SUSPEND
114#define OMAP5912_SRAM_API_SUSPEND 0
115#endif
116 100
117#if !defined(CONFIG_ARCH_OMAP1510) && \ 101#if !defined(CONFIG_ARCH_OMAP1510) && \
118 !defined(CONFIG_ARCH_OMAP16XX) 102 !defined(CONFIG_ARCH_OMAP16XX) && \
103 !defined(CONFIG_ARCH_OMAP24XX)
119#error "Power management for this processor not implemented yet" 104#error "Power management for this processor not implemented yet"
120#endif 105#endif
121 106
122#ifndef __ASSEMBLER__ 107#ifndef __ASSEMBLER__
123extern void omap_pm_idle(void); 108extern void omap_pm_idle(void);
124extern void omap_pm_suspend(void); 109extern void omap_pm_suspend(void);
125extern int omap1510_cpu_suspend(unsigned short, unsigned short); 110extern void omap1510_cpu_suspend(unsigned short, unsigned short);
126extern int omap1610_cpu_suspend(unsigned short, unsigned short); 111extern void omap1610_cpu_suspend(unsigned short, unsigned short);
127extern int omap1510_idle_loop_suspend(void); 112extern void omap1510_idle_loop_suspend(void);
128extern int omap1610_idle_loop_suspend(void); 113extern void omap1610_idle_loop_suspend(void);
114
115#ifdef CONFIG_OMAP_SERIAL_WAKE
116extern void omap_serial_wake_trigger(int enable);
117#else
118#define omap_serial_wake_trigger(x) {}
119#endif /* CONFIG_OMAP_SERIAL_WAKE */
120
129extern unsigned int omap1510_cpu_suspend_sz; 121extern unsigned int omap1510_cpu_suspend_sz;
130extern unsigned int omap1510_idle_loop_suspend_sz; 122extern unsigned int omap1510_idle_loop_suspend_sz;
131extern unsigned int omap1610_cpu_suspend_sz; 123extern unsigned int omap1610_cpu_suspend_sz;
@@ -161,6 +153,7 @@ enum arm_save_state {
161 ARM_SLEEP_SAVE_ARM_CKCTL, 153 ARM_SLEEP_SAVE_ARM_CKCTL,
162 ARM_SLEEP_SAVE_ARM_IDLECT1, 154 ARM_SLEEP_SAVE_ARM_IDLECT1,
163 ARM_SLEEP_SAVE_ARM_IDLECT2, 155 ARM_SLEEP_SAVE_ARM_IDLECT2,
156 ARM_SLEEP_SAVE_ARM_IDLECT3,
164 ARM_SLEEP_SAVE_ARM_EWUPCT, 157 ARM_SLEEP_SAVE_ARM_EWUPCT,
165 ARM_SLEEP_SAVE_ARM_RSTCT1, 158 ARM_SLEEP_SAVE_ARM_RSTCT1,
166 ARM_SLEEP_SAVE_ARM_RSTCT2, 159 ARM_SLEEP_SAVE_ARM_RSTCT2,
diff --git a/include/asm-arm/arch-omap/serial.h b/include/asm-arm/arch-omap/serial.h
new file mode 100644
index 000000000000..79a5297af9fc
--- /dev/null
+++ b/include/asm-arm/arch-omap/serial.h
@@ -0,0 +1,37 @@
1/*
2 * linux/include/asm-arm/arch-omap/serial.h
3 *
4 * This program is distributed in the hope that it will be useful,
5 * but WITHOUT ANY WARRANTY; without even the implied warranty of
6 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
7 * GNU General Public License for more details.
8 */
9
10#ifndef __ASM_ARCH_SERIAL_H
11#define __ASM_ARCH_SERIAL_H
12
13#if defined(CONFIG_ARCH_OMAP1)
14/* OMAP1 serial ports */
15#define OMAP_UART1_BASE 0xfffb0000
16#define OMAP_UART2_BASE 0xfffb0800
17#define OMAP_UART3_BASE 0xfffb9800
18#elif defined(CONFIG_ARCH_OMAP2)
19/* OMAP2 serial ports */
20#define OMAP_UART1_BASE 0x4806a000
21#define OMAP_UART2_BASE 0x4806c000
22#define OMAP_UART3_BASE 0x4806e000
23#endif
24
25#define OMAP_MAX_NR_PORTS 3
26#define OMAP1510_BASE_BAUD (12000000/16)
27#define OMAP16XX_BASE_BAUD (48000000/16)
28
29#define is_omap_port(p) ({int __ret = 0; \
30 if (p == IO_ADDRESS(OMAP_UART1_BASE) || \
31 p == IO_ADDRESS(OMAP_UART2_BASE) || \
32 p == IO_ADDRESS(OMAP_UART3_BASE)) \
33 __ret = 1; \
34 __ret; \
35 })
36
37#endif
diff --git a/include/asm-arm/arch-omap/uncompress.h b/include/asm-arm/arch-omap/uncompress.h
index 3e640aba8c20..3545c86859cc 100644
--- a/include/asm-arm/arch-omap/uncompress.h
+++ b/include/asm-arm/arch-omap/uncompress.h
@@ -20,7 +20,7 @@
20#include <linux/config.h> 20#include <linux/config.h>
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/serial_reg.h> 22#include <linux/serial_reg.h>
23#include <asm/arch/hardware.h> 23#include <asm/arch/serial.h>
24 24
25unsigned int system_rev; 25unsigned int system_rev;
26 26
@@ -34,8 +34,9 @@ static void
34putstr(const char *s) 34putstr(const char *s)
35{ 35{
36 volatile u8 * uart = 0; 36 volatile u8 * uart = 0;
37 int shift; 37 int shift = 2;
38 38
39#ifdef CONFIG_ARCH_OMAP
39#ifdef CONFIG_OMAP_LL_DEBUG_UART3 40#ifdef CONFIG_OMAP_LL_DEBUG_UART3
40 uart = (volatile u8 *)(OMAP_UART3_BASE); 41 uart = (volatile u8 *)(OMAP_UART3_BASE);
41#elif CONFIG_OMAP_LL_DEBUG_UART2 42#elif CONFIG_OMAP_LL_DEBUG_UART2
@@ -44,6 +45,7 @@ putstr(const char *s)
44 uart = (volatile u8 *)(OMAP_UART1_BASE); 45 uart = (volatile u8 *)(OMAP_UART1_BASE);
45#endif 46#endif
46 47
48#ifdef CONFIG_ARCH_OMAP1
47 /* Determine which serial port to use */ 49 /* Determine which serial port to use */
48 do { 50 do {
49 /* MMU is not on, so cpu_is_omapXXXX() won't work here */ 51 /* MMU is not on, so cpu_is_omapXXXX() won't work here */
@@ -51,14 +53,14 @@ putstr(const char *s)
51 53
52 if (omap_id == OMAP_ID_730) 54 if (omap_id == OMAP_ID_730)
53 shift = 0; 55 shift = 0;
54 else
55 shift = 2;
56 56
57 if (check_port(uart, shift)) 57 if (check_port(uart, shift))
58 break; 58 break;
59 /* Silent boot if no serial ports are enabled. */ 59 /* Silent boot if no serial ports are enabled. */
60 return; 60 return;
61 } while (0); 61 } while (0);
62#endif /* CONFIG_ARCH_OMAP1 */
63#endif
62 64
63 /* 65 /*
64 * Now, xmit each character 66 * Now, xmit each character
diff --git a/include/asm-arm/arch-pxa/corgi.h b/include/asm-arm/arch-pxa/corgi.h
index 324db06b5dd4..4b7aa0b8391e 100644
--- a/include/asm-arm/arch-pxa/corgi.h
+++ b/include/asm-arm/arch-pxa/corgi.h
@@ -103,18 +103,20 @@
103 * Shared data structures 103 * Shared data structures
104 */ 104 */
105extern struct platform_device corgiscoop_device; 105extern struct platform_device corgiscoop_device;
106extern struct platform_device corgissp_device;
107extern struct platform_device corgifb_device;
106 108
107/* 109/*
108 * External Functions 110 * External Functions
109 */ 111 */
110extern unsigned long corgi_ssp_ads7846_putget(unsigned long); 112extern unsigned long corgi_ssp_ads7846_putget(unsigned long);
111extern unsigned long corgi_ssp_ads7846_get(void); 113extern unsigned long corgi_ssp_ads7846_get(void);
112extern void corgi_ssp_ads7846_put(ulong data); 114extern void corgi_ssp_ads7846_put(unsigned long data);
113extern void corgi_ssp_ads7846_lock(void); 115extern void corgi_ssp_ads7846_lock(void);
114extern void corgi_ssp_ads7846_unlock(void); 116extern void corgi_ssp_ads7846_unlock(void);
115extern void corgi_ssp_lcdtg_send (u8 adrs, u8 data); 117extern void corgi_ssp_lcdtg_send (unsigned char adrs, unsigned char data);
116extern void corgi_ssp_blduty_set(int duty); 118extern void corgi_ssp_blduty_set(int duty);
117extern int corgi_ssp_max1111_get(ulong data); 119extern int corgi_ssp_max1111_get(unsigned long data);
118 120
119#endif /* __ASM_ARCH_CORGI_H */ 121#endif /* __ASM_ARCH_CORGI_H */
120 122
diff --git a/include/asm-arm/arch-pxa/mmc.h b/include/asm-arm/arch-pxa/mmc.h
index 7492ea7ea614..9718063a2119 100644
--- a/include/asm-arm/arch-pxa/mmc.h
+++ b/include/asm-arm/arch-pxa/mmc.h
@@ -10,6 +10,7 @@ struct mmc_host;
10struct pxamci_platform_data { 10struct pxamci_platform_data {
11 unsigned int ocr_mask; /* available voltages */ 11 unsigned int ocr_mask; /* available voltages */
12 int (*init)(struct device *, irqreturn_t (*)(int, void *, struct pt_regs *), void *); 12 int (*init)(struct device *, irqreturn_t (*)(int, void *, struct pt_regs *), void *);
13 int (*get_ro)(struct device *);
13 void (*setpower)(struct device *, unsigned int); 14 void (*setpower)(struct device *, unsigned int);
14 void (*exit)(struct device *, void *); 15 void (*exit)(struct device *, void *);
15}; 16};
diff --git a/include/asm-arm/arch-s3c2410/anubis-cpld.h b/include/asm-arm/arch-s3c2410/anubis-cpld.h
new file mode 100644
index 000000000000..5675b1796b55
--- /dev/null
+++ b/include/asm-arm/arch-s3c2410/anubis-cpld.h
@@ -0,0 +1,24 @@
1/* linux/include/asm-arm/arch-s3c2410/anubis-cpld.h
2 *
3 * (c) 2005 Simtec Electronics
4 * http://www.simtec.co.uk/products/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * ANUBIS - CPLD control constants
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Changelog:
14 *
15*/
16
17#ifndef __ASM_ARCH_ANUBISCPLD_H
18#define __ASM_ARCH_ANUBISCPLD_H
19
20/* CTRL2 - NAND WP control, IDE Reset assert/check */
21
22#define ANUBIS_CTRL1_NANDSEL (0x3)
23
24#endif /* __ASM_ARCH_ANUBISCPLD_H */
diff --git a/include/asm-arm/arch-s3c2410/anubis-irq.h b/include/asm-arm/arch-s3c2410/anubis-irq.h
new file mode 100644
index 000000000000..82f15dbd97e8
--- /dev/null
+++ b/include/asm-arm/arch-s3c2410/anubis-irq.h
@@ -0,0 +1,23 @@
1/* linux/include/asm-arm/arch-s3c2410/anubis-irq.h
2 *
3 * (c) 2005 Simtec Electronics
4 * http://www.simtec.co.uk/products/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * ANUBIS - IRQ Number definitions
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Changelog:
14 */
15
16#ifndef __ASM_ARCH_ANUBISIRQ_H
17#define __ASM_ARCH_ANUBISIRQ_H
18
19#define IRQ_IDE0 IRQ_EINT2
20#define IRQ_IDE1 IRQ_EINT3
21#define IRQ_ASIX IRQ_EINT1
22
23#endif /* __ASM_ARCH_ANUBISIRQ_H */
diff --git a/include/asm-arm/arch-s3c2410/anubis-map.h b/include/asm-arm/arch-s3c2410/anubis-map.h
new file mode 100644
index 000000000000..97741d6e506a
--- /dev/null
+++ b/include/asm-arm/arch-s3c2410/anubis-map.h
@@ -0,0 +1,46 @@
1/* linux/include/asm-arm/arch-s3c2410/anubis-map.h
2 *
3 * (c) 2005 Simtec Electronics
4 * http://www.simtec.co.uk/products/
5 * Ben Dooks <ben@simtec.co.uk>
6 *
7 * ANUBIS - Memory map definitions
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Changelog:
14*/
15
16/* needs arch/map.h including with this */
17
18#ifndef __ASM_ARCH_ANUBISMAP_H
19#define __ASM_ARCH_ANUBISMAP_H
20
21/* start peripherals off after the S3C2410 */
22
23#define ANUBIS_IOADDR(x) (S3C2410_ADDR((x) + 0x02000000))
24
25#define ANUBIS_PA_CPLD (S3C2410_CS1 | (1<<26))
26
27/* we put the CPLD registers next, to get them out of the way */
28
29#define ANUBIS_VA_CTRL1 ANUBIS_IOADDR(0x00000000) /* 0x01300000 */
30#define ANUBIS_PA_CTRL1 (ANUBIS_PA_CPLD)
31
32#define ANUBIS_VA_CTRL2 ANUBIS_IOADDR(0x00100000) /* 0x01400000 */
33#define ANUBIS_PA_CTRL2 (ANUBIS_PA_CPLD)
34
35#define ANUBIS_VA_CTRL3 ANUBIS_IOADDR(0x00200000) /* 0x01500000 */
36#define ANUBIS_PA_CTRL3 (ANUBIS_PA_CPLD)
37
38#define ANUBIS_VA_CTRL4 ANUBIS_IOADDR(0x00300000) /* 0x01600000 */
39#define ANUBIS_PA_CTRL4 (ANUBIS_PA_CPLD)
40
41#define ANUBIS_IDEPRI ANUBIS_IOADDR(0x01000000)
42#define ANUBIS_IDEPRIAUX ANUBIS_IOADDR(0x01100000)
43#define ANUBIS_IDESEC ANUBIS_IOADDR(0x01200000)
44#define ANUBIS_IDESECAUX ANUBIS_IOADDR(0x01300000)
45
46#endif /* __ASM_ARCH_ANUBISMAP_H */
diff --git a/include/asm-arm/auxvec.h b/include/asm-arm/auxvec.h
new file mode 100644
index 000000000000..c0536f6b29a7
--- /dev/null
+++ b/include/asm-arm/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMARM_AUXVEC_H
2#define __ASMARM_AUXVEC_H
3
4#endif
diff --git a/include/asm-arm/fcntl.h b/include/asm-arm/fcntl.h
index 485b6bdf4d7a..a80b6607b2ef 100644
--- a/include/asm-arm/fcntl.h
+++ b/include/asm-arm/fcntl.h
@@ -1,87 +1,11 @@
1#ifndef _ARM_FCNTL_H 1#ifndef _ARM_FCNTL_H
2#define _ARM_FCNTL_H 2#define _ARM_FCNTL_H
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECTORY 040000 /* must be a directory */ 4#define O_DIRECTORY 040000 /* must be a directory */
20#define O_NOFOLLOW 0100000 /* don't follow links */ 5#define O_NOFOLLOW 0100000 /* don't follow links */
21#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ 6#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
22#define O_LARGEFILE 0400000 7#define O_LARGEFILE 0400000
23#define O_NOATIME 01000000
24 8
25#define F_DUPFD 0 /* dup */ 9#include <asm-generic/fcntl.h>
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33 10
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif 11#endif
diff --git a/include/asm-arm/futex.h b/include/asm-arm/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-arm/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-arm/hdreg.h b/include/asm-arm/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-arm/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index a7c018b8a0d4..a2fdad0138b3 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -77,12 +77,6 @@ static inline void set_fs (mm_segment_t fs)
77 77
78#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 78#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
79 79
80/* this function will go away soon - use access_ok() instead */
81static inline int __deprecated verify_area(int type, const void __user *addr, unsigned long size)
82{
83 return access_ok(type, addr, size) ? 0 : -EFAULT;
84}
85
86/* 80/*
87 * Single-value transfer routines. They automatically use the right 81 * Single-value transfer routines. They automatically use the right
88 * size if we just have the right pointer type. Note that the functions 82 * size if we just have the right pointer type. Note that the functions
diff --git a/include/asm-arm26/auxvec.h b/include/asm-arm26/auxvec.h
new file mode 100644
index 000000000000..c0536f6b29a7
--- /dev/null
+++ b/include/asm-arm26/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMARM_AUXVEC_H
2#define __ASMARM_AUXVEC_H
3
4#endif
diff --git a/include/asm-arm26/fcntl.h b/include/asm-arm26/fcntl.h
index 485b6bdf4d7a..d85995e7459e 100644
--- a/include/asm-arm26/fcntl.h
+++ b/include/asm-arm26/fcntl.h
@@ -3,85 +3,11 @@
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */ 5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECTORY 040000 /* must be a directory */ 6#define O_DIRECTORY 040000 /* must be a directory */
20#define O_NOFOLLOW 0100000 /* don't follow links */ 7#define O_NOFOLLOW 0100000 /* don't follow links */
21#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ 8#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
22#define O_LARGEFILE 0400000 9#define O_LARGEFILE 0400000
23#define O_NOATIME 01000000
24 10
25#define F_DUPFD 0 /* dup */ 11#include <asm-generic/fcntl.h>
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33 12
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif 13#endif
diff --git a/include/asm-arm26/futex.h b/include/asm-arm26/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-arm26/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-arm26/hdreg.h b/include/asm-arm26/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-arm26/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-arm26/uaccess.h b/include/asm-arm26/uaccess.h
index ab9ce38c6aec..3f2dd1093e58 100644
--- a/include/asm-arm26/uaccess.h
+++ b/include/asm-arm26/uaccess.h
@@ -40,12 +40,6 @@ extern int fixup_exception(struct pt_regs *regs);
40 40
41#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 41#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
42 42
43/* this function will go away soon - use access_ok() instead */
44static inline int __deprecated verify_area(int type, const void * addr, unsigned long size)
45{
46 return access_ok(type, addr, size) ? 0 : -EFAULT;
47}
48
49/* 43/*
50 * Single-value transfer routines. They automatically use the right 44 * Single-value transfer routines. They automatically use the right
51 * size if we just have the right pointer type. Note that the functions 45 * size if we just have the right pointer type. Note that the functions
diff --git a/include/asm-cris/auxvec.h b/include/asm-cris/auxvec.h
new file mode 100644
index 000000000000..cb30b01bf19f
--- /dev/null
+++ b/include/asm-cris/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMCRIS_AUXVEC_H
2#define __ASMCRIS_AUXVEC_H
3
4#endif
diff --git a/include/asm-cris/fcntl.h b/include/asm-cris/fcntl.h
index 61c563242b51..46ab12db5739 100644
--- a/include/asm-cris/fcntl.h
+++ b/include/asm-cris/fcntl.h
@@ -1,90 +1 @@
1#ifndef _CRIS_FCNTL_H #include <asm-generic/fcntl.h>
2#define _CRIS_FCNTL_H
3
4/* verbatim copy of i386 version */
5
6/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
7 located on an ext2 file system */
8#define O_ACCMODE 0003
9#define O_RDONLY 00
10#define O_WRONLY 01
11#define O_RDWR 02
12#define O_CREAT 0100 /* not fcntl */
13#define O_EXCL 0200 /* not fcntl */
14#define O_NOCTTY 0400 /* not fcntl */
15#define O_TRUNC 01000 /* not fcntl */
16#define O_APPEND 02000
17#define O_NONBLOCK 04000
18#define O_NDELAY O_NONBLOCK
19#define O_SYNC 010000
20#define FASYNC 020000 /* fcntl, for BSD compatibility */
21#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
22#define O_LARGEFILE 0100000
23#define O_DIRECTORY 0200000 /* must be a directory */
24#define O_NOFOLLOW 0400000 /* don't follow links */
25#define O_NOATIME 01000000
26
27#define F_DUPFD 0 /* dup */
28#define F_GETFD 1 /* get f_flags */
29#define F_SETFD 2 /* set f_flags */
30#define F_GETFL 3 /* more flags (cloexec) */
31#define F_SETFL 4
32#define F_GETLK 5
33#define F_SETLK 6
34#define F_SETLKW 7
35
36#define F_SETOWN 8 /* for sockets. */
37#define F_GETOWN 9 /* for sockets. */
38#define F_SETSIG 10 /* for sockets. */
39#define F_GETSIG 11 /* for sockets. */
40
41#define F_GETLK64 12 /* using 'struct flock64' */
42#define F_SETLK64 13
43#define F_SETLKW64 14
44
45/* for F_[GET|SET]FL */
46#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
47
48/* for posix fcntl() and lockf() */
49#define F_RDLCK 0
50#define F_WRLCK 1
51#define F_UNLCK 2
52
53/* for old implementation of bsd flock () */
54#define F_EXLCK 4 /* or 3 */
55#define F_SHLCK 8 /* or 4 */
56
57/* for leases */
58#define F_INPROGRESS 16
59
60/* operations for bsd flock(), also used by the kernel implementation */
61#define LOCK_SH 1 /* shared lock */
62#define LOCK_EX 2 /* exclusive lock */
63#define LOCK_NB 4 /* or'd with one of the above to prevent
64 blocking */
65#define LOCK_UN 8 /* remove lock */
66
67#define LOCK_MAND 32 /* This is a mandatory flock */
68#define LOCK_READ 64 /* ... Which allows concurrent read operations */
69#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
70#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
71
72struct flock {
73 short l_type;
74 short l_whence;
75 off_t l_start;
76 off_t l_len;
77 pid_t l_pid;
78};
79
80struct flock64 {
81 short l_type;
82 short l_whence;
83 loff_t l_start;
84 loff_t l_len;
85 pid_t l_pid;
86};
87
88#define F_LINUX_SPECIFIC_BASE 1024
89
90#endif
diff --git a/include/asm-cris/futex.h b/include/asm-cris/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-cris/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-cris/irq.h b/include/asm-cris/irq.h
index 8e787fdaedd4..4fab5c3b2e15 100644
--- a/include/asm-cris/irq.h
+++ b/include/asm-cris/irq.h
@@ -1,6 +1,11 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4/*
5 * IRQ line status macro IRQ_PER_CPU is used
6 */
7#define ARCH_HAS_IRQ_PER_CPU
8
4#include <asm/arch/irq.h> 9#include <asm/arch/irq.h>
5 10
6extern __inline__ int irq_canonicalize(int irq) 11extern __inline__ int irq_canonicalize(int irq)
diff --git a/include/asm-cris/uaccess.h b/include/asm-cris/uaccess.h
index 6db17221fd9e..7d50086eb5ea 100644
--- a/include/asm-cris/uaccess.h
+++ b/include/asm-cris/uaccess.h
@@ -91,13 +91,6 @@
91#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) 91#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
92#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) 92#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
93 93
94/* this function will go away soon - use access_ok() instead */
95extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
96{
97 return access_ok(type,addr,size) ? 0 : -EFAULT;
98}
99
100
101#include <asm/arch/uaccess.h> 94#include <asm/arch/uaccess.h>
102 95
103/* 96/*
diff --git a/include/asm-frv/auxvec.h b/include/asm-frv/auxvec.h
new file mode 100644
index 000000000000..07710778fa10
--- /dev/null
+++ b/include/asm-frv/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __FRV_AUXVEC_H
2#define __FRV_AUXVEC_H
3
4#endif
diff --git a/include/asm-frv/fcntl.h b/include/asm-frv/fcntl.h
index d61b999f9973..46ab12db5739 100644
--- a/include/asm-frv/fcntl.h
+++ b/include/asm-frv/fcntl.h
@@ -1,88 +1 @@
1#ifndef _ASM_FCNTL_H #include <asm-generic/fcntl.h>
2#define _ASM_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECT 040000 /* direct disk access hint */
20#define O_LARGEFILE 0100000
21#define O_DIRECTORY 0200000 /* must be a directory */
22#define O_NOFOLLOW 0400000 /* don't follow links */
23#define O_NOATIME 01000000
24
25#define F_DUPFD 0 /* dup */
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif /* _ASM_FCNTL_H */
88
diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-frv/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-frv/uaccess.h b/include/asm-frv/uaccess.h
index 32dc52e883e5..991b50fbba24 100644
--- a/include/asm-frv/uaccess.h
+++ b/include/asm-frv/uaccess.h
@@ -67,12 +67,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size)
67#define access_ok(type,addr,size) (__range_ok((addr), (size)) == 0) 67#define access_ok(type,addr,size) (__range_ok((addr), (size)) == 0)
68#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0) 68#define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
69 69
70/* this function will go away soon - use access_ok() / __range_ok() instead */
71static inline int __deprecated verify_area(int type, const void * addr, unsigned long size)
72{
73 return __range_ok(addr, size);
74}
75
76/* 70/*
77 * The exception table consists of pairs of addresses: the first is the 71 * The exception table consists of pairs of addresses: the first is the
78 * address of an instruction that is allowed to fault, and the second is 72 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
new file mode 100644
index 000000000000..b663520dcdc4
--- /dev/null
+++ b/include/asm-generic/fcntl.h
@@ -0,0 +1,149 @@
1#ifndef _ASM_GENERIC_FCNTL_H
2#define _ASM_GENERIC_FCNTL_H
3
4#include <linux/config.h>
5#include <linux/types.h>
6
7/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
8 located on an ext2 file system */
9#define O_ACCMODE 00000003
10#define O_RDONLY 00000000
11#define O_WRONLY 00000001
12#define O_RDWR 00000002
13#ifndef O_CREAT
14#define O_CREAT 00000100 /* not fcntl */
15#endif
16#ifndef O_EXCL
17#define O_EXCL 00000200 /* not fcntl */
18#endif
19#ifndef O_NOCTTY
20#define O_NOCTTY 00000400 /* not fcntl */
21#endif
22#ifndef O_TRUNC
23#define O_TRUNC 00001000 /* not fcntl */
24#endif
25#ifndef O_APPEND
26#define O_APPEND 00002000
27#endif
28#ifndef O_NONBLOCK
29#define O_NONBLOCK 00004000
30#endif
31#ifndef O_SYNC
32#define O_SYNC 00010000
33#endif
34#ifndef FASYNC
35#define FASYNC 00020000 /* fcntl, for BSD compatibility */
36#endif
37#ifndef O_DIRECT
38#define O_DIRECT 00040000 /* direct disk access hint */
39#endif
40#ifndef O_LARGEFILE
41#define O_LARGEFILE 00100000
42#endif
43#ifndef O_DIRECTORY
44#define O_DIRECTORY 00200000 /* must be a directory */
45#endif
46#ifndef O_NOFOLLOW
47#define O_NOFOLLOW 00400000 /* don't follow links */
48#endif
49#ifndef O_NOATIME
50#define O_NOATIME 01000000
51#endif
52#ifndef O_NDELAY
53#define O_NDELAY O_NONBLOCK
54#endif
55
56#define F_DUPFD 0 /* dup */
57#define F_GETFD 1 /* get close_on_exec */
58#define F_SETFD 2 /* set/clear close_on_exec */
59#define F_GETFL 3 /* get file->f_flags */
60#define F_SETFL 4 /* set file->f_flags */
61#ifndef F_GETLK
62#define F_GETLK 5
63#define F_SETLK 6
64#define F_SETLKW 7
65#endif
66#ifndef F_SETOWN
67#define F_SETOWN 8 /* for sockets. */
68#define F_GETOWN 9 /* for sockets. */
69#endif
70#ifndef F_SETSIG
71#define F_SETSIG 10 /* for sockets. */
72#define F_GETSIG 11 /* for sockets. */
73#endif
74
75/* for F_[GET|SET]FL */
76#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
77
78/* for posix fcntl() and lockf() */
79#ifndef F_RDLCK
80#define F_RDLCK 0
81#define F_WRLCK 1
82#define F_UNLCK 2
83#endif
84
85/* for old implementation of bsd flock () */
86#ifndef F_EXLCK
87#define F_EXLCK 4 /* or 3 */
88#define F_SHLCK 8 /* or 4 */
89#endif
90
91/* for leases */
92#ifndef F_INPROGRESS
93#define F_INPROGRESS 16
94#endif
95
96/* operations for bsd flock(), also used by the kernel implementation */
97#define LOCK_SH 1 /* shared lock */
98#define LOCK_EX 2 /* exclusive lock */
99#define LOCK_NB 4 /* or'd with one of the above to prevent
100 blocking */
101#define LOCK_UN 8 /* remove lock */
102
103#define LOCK_MAND 32 /* This is a mandatory flock ... */
104#define LOCK_READ 64 /* which allows concurrent read operations */
105#define LOCK_WRITE 128 /* which allows concurrent write operations */
106#define LOCK_RW 192 /* which allows concurrent read & write ops */
107
108#define F_LINUX_SPECIFIC_BASE 1024
109
110#ifndef HAVE_ARCH_STRUCT_FLOCK
111#ifndef __ARCH_FLOCK_PAD
112#define __ARCH_FLOCK_PAD
113#endif
114
115struct flock {
116 short l_type;
117 short l_whence;
118 off_t l_start;
119 off_t l_len;
120 pid_t l_pid;
121 __ARCH_FLOCK_PAD
122};
123#endif
124
125#ifndef CONFIG_64BIT
126
127#ifndef F_GETLK64
128#define F_GETLK64 12 /* using 'struct flock64' */
129#define F_SETLK64 13
130#define F_SETLKW64 14
131#endif
132
133#ifndef HAVE_ARCH_STRUCT_FLOCK64
134#ifndef __ARCH_FLOCK64_PAD
135#define __ARCH_FLOCK64_PAD
136#endif
137
138struct flock64 {
139 short l_type;
140 short l_whence;
141 loff_t l_start;
142 loff_t l_len;
143 pid_t l_pid;
144 __ARCH_FLOCK64_PAD
145};
146#endif
147#endif /* !CONFIG_64BIT */
148
149#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/include/asm-generic/hdreg.h b/include/asm-generic/hdreg.h
deleted file mode 100644
index 7051fba8bcf9..000000000000
--- a/include/asm-generic/hdreg.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#warning <asm/hdreg.h> is obsolete, please do not use it
2
3#ifndef __ASM_GENERIC_HDREG_H
4#define __ASM_GENERIC_HDREG_H
5
6typedef unsigned long ide_ioreg_t;
7
8#endif /* __ASM_GENERIC_HDREG_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 450eae22c39a..886dbd116899 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -12,5 +12,6 @@ extern char _sextratext[] __attribute__((weak));
12extern char _eextratext[] __attribute__((weak)); 12extern char _eextratext[] __attribute__((weak));
13extern char _end[]; 13extern char _end[];
14extern char __per_cpu_start[], __per_cpu_end[]; 14extern char __per_cpu_start[], __per_cpu_end[];
15extern char __kprobes_text_start[], __kprobes_text_end[];
15 16
16#endif /* _ASM_GENERIC_SECTIONS_H_ */ 17#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 6c90f0f36eec..4dc8ddb401c1 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -16,9 +16,9 @@
16 * The main single-value unaligned transfer routines. 16 * The main single-value unaligned transfer routines.
17 */ 17 */
18#define get_unaligned(ptr) \ 18#define get_unaligned(ptr) \
19 ((__typeof__(*(ptr)))__get_unaligned((ptr), sizeof(*(ptr)))) 19 __get_unaligned((ptr), sizeof(*(ptr)))
20#define put_unaligned(x,ptr) \ 20#define put_unaligned(x,ptr) \
21 __put_unaligned((unsigned long)(x), (ptr), sizeof(*(ptr))) 21 __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr)))
22 22
23/* 23/*
24 * This function doesn't actually exist. The idea is that when 24 * This function doesn't actually exist. The idea is that when
@@ -36,19 +36,19 @@ struct __una_u16 { __u16 x __attribute__((packed)); };
36 * Elemental unaligned loads 36 * Elemental unaligned loads
37 */ 37 */
38 38
39static inline unsigned long __uldq(const __u64 *addr) 39static inline __u64 __uldq(const __u64 *addr)
40{ 40{
41 const struct __una_u64 *ptr = (const struct __una_u64 *) addr; 41 const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
42 return ptr->x; 42 return ptr->x;
43} 43}
44 44
45static inline unsigned long __uldl(const __u32 *addr) 45static inline __u32 __uldl(const __u32 *addr)
46{ 46{
47 const struct __una_u32 *ptr = (const struct __una_u32 *) addr; 47 const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
48 return ptr->x; 48 return ptr->x;
49} 49}
50 50
51static inline unsigned long __uldw(const __u16 *addr) 51static inline __u16 __uldw(const __u16 *addr)
52{ 52{
53 const struct __una_u16 *ptr = (const struct __una_u16 *) addr; 53 const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
54 return ptr->x; 54 return ptr->x;
@@ -78,7 +78,7 @@ static inline void __ustw(__u16 val, __u16 *addr)
78 78
79#define __get_unaligned(ptr, size) ({ \ 79#define __get_unaligned(ptr, size) ({ \
80 const void *__gu_p = ptr; \ 80 const void *__gu_p = ptr; \
81 unsigned long val; \ 81 __typeof__(*(ptr)) val; \
82 switch (size) { \ 82 switch (size) { \
83 case 1: \ 83 case 1: \
84 val = *(const __u8 *)__gu_p; \ 84 val = *(const __u8 *)__gu_p; \
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 3fa94288aa93..6f857be2b644 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -97,3 +97,9 @@
97 VMLINUX_SYMBOL(__lock_text_start) = .; \ 97 VMLINUX_SYMBOL(__lock_text_start) = .; \
98 *(.spinlock.text) \ 98 *(.spinlock.text) \
99 VMLINUX_SYMBOL(__lock_text_end) = .; 99 VMLINUX_SYMBOL(__lock_text_end) = .;
100
101#define KPROBES_TEXT \
102 ALIGN_FUNCTION(); \
103 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
104 *(.kprobes.text) \
105 VMLINUX_SYMBOL(__kprobes_text_end) = .;
diff --git a/include/asm-h8300/auxvec.h b/include/asm-h8300/auxvec.h
new file mode 100644
index 000000000000..1d36fe38b088
--- /dev/null
+++ b/include/asm-h8300/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMH8300_AUXVEC_H
2#define __ASMH8300_AUXVEC_H
3
4#endif
diff --git a/include/asm-h8300/fcntl.h b/include/asm-h8300/fcntl.h
index 355350a57bf9..1952cb2e3b06 100644
--- a/include/asm-h8300/fcntl.h
+++ b/include/asm-h8300/fcntl.h
@@ -1,87 +1,11 @@
1#ifndef _H8300_FCNTL_H 1#ifndef _H8300_FCNTL_H
2#define _H8300_FCNTL_H 2#define _H8300_FCNTL_H
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECTORY 040000 /* must be a directory */ 4#define O_DIRECTORY 040000 /* must be a directory */
20#define O_NOFOLLOW 0100000 /* don't follow links */ 5#define O_NOFOLLOW 0100000 /* don't follow links */
21#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ 6#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
22#define O_LARGEFILE 0400000 7#define O_LARGEFILE 0400000
23#define O_NOATIME 01000000
24 8
25#define F_DUPFD 0 /* dup */ 9#include <asm-generic/fcntl.h>
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33 10
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif /* _H8300_FCNTL_H */ 11#endif /* _H8300_FCNTL_H */
diff --git a/include/asm-h8300/futex.h b/include/asm-h8300/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-h8300/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-h8300/hdreg.h b/include/asm-h8300/hdreg.h
deleted file mode 100644
index 36d0c06687d8..000000000000
--- a/include/asm-h8300/hdreg.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/*
2 * linux/include/asm-h8300/hdreg.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7#warning this file is obsolete, please do not use it
8
9#ifndef _H8300_HDREG_H
10#define _H8300_HDREG_H
11
12typedef unsigned int q40ide_ioreg_t;
13typedef unsigned char * ide_ioreg_t;
14
15#endif /* _H8300_HDREG_H */
diff --git a/include/asm-h8300/uaccess.h b/include/asm-h8300/uaccess.h
index 1480f307a474..ebe58c6c8387 100644
--- a/include/asm-h8300/uaccess.h
+++ b/include/asm-h8300/uaccess.h
@@ -24,12 +24,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
24 return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend)); 24 return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend));
25} 25}
26 26
27/* this function will go away soon - use access_ok() instead */
28static inline int __deprecated verify_area(int type, const void *addr, unsigned long size)
29{
30 return access_ok(type,addr,size)?0:-EFAULT;
31}
32
33/* 27/*
34 * The exception table consists of pairs of addresses: the first is the 28 * The exception table consists of pairs of addresses: the first is the
35 * address of an instruction that is allowed to fault, and the second is 29 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-i386/auxvec.h b/include/asm-i386/auxvec.h
new file mode 100644
index 000000000000..395e13016bfb
--- /dev/null
+++ b/include/asm-i386/auxvec.h
@@ -0,0 +1,11 @@
1#ifndef __ASMi386_AUXVEC_H
2#define __ASMi386_AUXVEC_H
3
4/*
5 * Architecture-neutral AT_ values in 0-17, leave some room
6 * for more of them, start the x86-specific ones at 32.
7 */
8#define AT_SYSINFO 32
9#define AT_SYSINFO_EHDR 33
10
11#endif
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 130bdc8c68cf..fa11117d3cfa 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -9,6 +9,7 @@
9#include <asm/user.h> 9#include <asm/user.h>
10#include <asm/processor.h> 10#include <asm/processor.h>
11#include <asm/system.h> /* for savesegment */ 11#include <asm/system.h> /* for savesegment */
12#include <asm/auxvec.h>
12 13
13#include <linux/utsname.h> 14#include <linux/utsname.h>
14 15
@@ -109,13 +110,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
109 110
110#define ELF_PLATFORM (system_utsname.machine) 111#define ELF_PLATFORM (system_utsname.machine)
111 112
112/*
113 * Architecture-neutral AT_ values in 0-17, leave some room
114 * for more of them, start the x86-specific ones at 32.
115 */
116#define AT_SYSINFO 32
117#define AT_SYSINFO_EHDR 33
118
119#ifdef __KERNEL__ 113#ifdef __KERNEL__
120#define SET_PERSONALITY(ex, ibcs2) do { } while (0) 114#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
121 115
diff --git a/include/asm-i386/fcntl.h b/include/asm-i386/fcntl.h
index 511cde94a3ed..46ab12db5739 100644
--- a/include/asm-i386/fcntl.h
+++ b/include/asm-i386/fcntl.h
@@ -1,88 +1 @@
1#ifndef _I386_FCNTL_H #include <asm-generic/fcntl.h>
2#define _I386_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECT 040000 /* direct disk access hint */
20#define O_LARGEFILE 0100000
21#define O_DIRECTORY 0200000 /* must be a directory */
22#define O_NOFOLLOW 0400000 /* don't follow links */
23#define O_NOATIME 01000000
24
25#define F_DUPFD 0 /* dup */
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87
88#endif
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
new file mode 100644
index 000000000000..44b9db806474
--- /dev/null
+++ b/include/asm-i386/futex.h
@@ -0,0 +1,108 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/system.h>
9#include <asm/processor.h>
10#include <asm/uaccess.h>
11
12#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13 __asm__ __volatile ( \
14"1: " insn "\n" \
15"2: .section .fixup,\"ax\"\n\
163: mov %3, %1\n\
17 jmp 2b\n\
18 .previous\n\
19 .section __ex_table,\"a\"\n\
20 .align 8\n\
21 .long 1b,3b\n\
22 .previous" \
23 : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
24 : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
25
26#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
27 __asm__ __volatile ( \
28"1: movl %2, %0\n\
29 movl %0, %3\n" \
30 insn "\n" \
31"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
32 jnz 1b\n\
333: .section .fixup,\"ax\"\n\
344: mov %5, %1\n\
35 jmp 3b\n\
36 .previous\n\
37 .section __ex_table,\"a\"\n\
38 .align 8\n\
39 .long 1b,4b,2b,4b\n\
40 .previous" \
41 : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
42 "=&r" (tem) \
43 : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
44
45static inline int
46futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
47{
48 int op = (encoded_op >> 28) & 7;
49 int cmp = (encoded_op >> 24) & 15;
50 int oparg = (encoded_op << 8) >> 20;
51 int cmparg = (encoded_op << 20) >> 20;
52 int oldval = 0, ret, tem;
53 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
54 oparg = 1 << oparg;
55
56 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
57 return -EFAULT;
58
59 inc_preempt_count();
60
61 if (op == FUTEX_OP_SET)
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
63 else {
64#ifndef CONFIG_X86_BSWAP
65 if (boot_cpu_data.x86 == 3)
66 ret = -ENOSYS;
67 else
68#endif
69 switch (op) {
70 case FUTEX_OP_ADD:
71 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret,
72 oldval, uaddr, oparg);
73 break;
74 case FUTEX_OP_OR:
75 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr,
76 oparg);
77 break;
78 case FUTEX_OP_ANDN:
79 __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr,
80 ~oparg);
81 break;
82 case FUTEX_OP_XOR:
83 __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr,
84 oparg);
85 break;
86 default:
87 ret = -ENOSYS;
88 }
89 }
90
91 dec_preempt_count();
92
93 if (!ret) {
94 switch (cmp) {
95 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
96 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
97 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
98 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
99 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
100 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
101 default: ret = -ENOSYS;
102 }
103 }
104 return ret;
105}
106
107#endif
108#endif
diff --git a/include/asm-i386/hdreg.h b/include/asm-i386/hdreg.h
deleted file mode 100644
index 5989bbc97cbf..000000000000
--- a/include/asm-i386/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#warning this file is obsolete, please do not use it
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index 886867aea947..89ab7e2bc5aa 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -83,30 +83,6 @@ extern struct movsl_mask {
83 */ 83 */
84#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) 84#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0))
85 85
86/**
87 * verify_area: - Obsolete/deprecated and will go away soon,
88 * use access_ok() instead.
89 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
90 * @addr: User space pointer to start of block to check
91 * @size: Size of block to check
92 *
93 * Context: User context only. This function may sleep.
94 *
95 * This function has been replaced by access_ok().
96 *
97 * Checks if a pointer to a block of memory in user space is valid.
98 *
99 * Returns zero if the memory block may be valid, -EFAULT
100 * if it is definitely invalid.
101 *
102 * See access_ok() for more details.
103 */
104static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
105{
106 return access_ok(type,addr,size) ? 0 : -EFAULT;
107}
108
109
110/* 86/*
111 * The exception table consists of pairs of addresses: the first is the 87 * The exception table consists of pairs of addresses: the first is the
112 * address of an instruction that is allowed to fault, and the second is 88 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-ia64/auxvec.h b/include/asm-ia64/auxvec.h
new file mode 100644
index 000000000000..23cebe5685b9
--- /dev/null
+++ b/include/asm-ia64/auxvec.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_IA64_AUXVEC_H
2#define _ASM_IA64_AUXVEC_H
3
4/*
5 * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
6 * them, start the architecture-specific ones at 32.
7 */
8#define AT_SYSINFO 32
9#define AT_SYSINFO_EHDR 33
10
11#endif /* _ASM_IA64_AUXVEC_H */
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h
index 0c05e5bad8a0..aaf11f4e9169 100644
--- a/include/asm-ia64/compat.h
+++ b/include/asm-ia64/compat.h
@@ -13,10 +13,10 @@ typedef s32 compat_time_t;
13typedef s32 compat_clock_t; 13typedef s32 compat_clock_t;
14typedef s32 compat_key_t; 14typedef s32 compat_key_t;
15typedef s32 compat_pid_t; 15typedef s32 compat_pid_t;
16typedef u16 compat_uid_t; 16typedef u16 __compat_uid_t;
17typedef u16 compat_gid_t; 17typedef u16 __compat_gid_t;
18typedef u32 compat_uid32_t; 18typedef u32 __compat_uid32_t;
19typedef u32 compat_gid32_t; 19typedef u32 __compat_gid32_t;
20typedef u16 compat_mode_t; 20typedef u16 compat_mode_t;
21typedef u32 compat_ino_t; 21typedef u32 compat_ino_t;
22typedef u16 compat_dev_t; 22typedef u16 compat_dev_t;
@@ -50,8 +50,8 @@ struct compat_stat {
50 compat_ino_t st_ino; 50 compat_ino_t st_ino;
51 compat_mode_t st_mode; 51 compat_mode_t st_mode;
52 compat_nlink_t st_nlink; 52 compat_nlink_t st_nlink;
53 compat_uid_t st_uid; 53 __compat_uid_t st_uid;
54 compat_gid_t st_gid; 54 __compat_gid_t st_gid;
55 compat_dev_t st_rdev; 55 compat_dev_t st_rdev;
56 u16 __pad2; 56 u16 __pad2;
57 u32 st_size; 57 u32 st_size;
@@ -120,10 +120,10 @@ typedef u32 compat_sigset_word;
120 120
121struct compat_ipc64_perm { 121struct compat_ipc64_perm {
122 compat_key_t key; 122 compat_key_t key;
123 compat_uid32_t uid; 123 __compat_uid32_t uid;
124 compat_gid32_t gid; 124 __compat_gid32_t gid;
125 compat_uid32_t cuid; 125 __compat_uid32_t cuid;
126 compat_gid32_t cgid; 126 __compat_gid32_t cgid;
127 unsigned short mode; 127 unsigned short mode;
128 unsigned short __pad1; 128 unsigned short __pad1;
129 unsigned short seq; 129 unsigned short seq;
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
index 7d4ccc4b976e..446fce036fd9 100644
--- a/include/asm-ia64/elf.h
+++ b/include/asm-ia64/elf.h
@@ -12,6 +12,7 @@
12 12
13#include <asm/fpu.h> 13#include <asm/fpu.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/auxvec.h>
15 16
16/* 17/*
17 * This is used to ensure we don't load something for the wrong architecture. 18 * This is used to ensure we don't load something for the wrong architecture.
@@ -177,13 +178,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
177 relevant until we have real hardware to play with... */ 178 relevant until we have real hardware to play with... */
178#define ELF_PLATFORM NULL 179#define ELF_PLATFORM NULL
179 180
180/*
181 * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of
182 * them, start the architecture-specific ones at 32.
183 */
184#define AT_SYSINFO 32
185#define AT_SYSINFO_EHDR 33
186
187#ifdef __KERNEL__ 181#ifdef __KERNEL__
188#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX) 182#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
189#define elf_read_implies_exec(ex, executable_stack) \ 183#define elf_read_implies_exec(ex, executable_stack) \
diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h
index cee16ea1780a..1dd275dc8f65 100644
--- a/include/asm-ia64/fcntl.h
+++ b/include/asm-ia64/fcntl.h
@@ -1,87 +1,13 @@
1#ifndef _ASM_IA64_FCNTL_H 1#ifndef _ASM_IA64_FCNTL_H
2#define _ASM_IA64_FCNTL_H 2#define _ASM_IA64_FCNTL_H
3/* 3/*
4 * Based on <asm-i386/fcntl.h>.
5 *
6 * Modified 1998-2000 4 * Modified 1998-2000
7 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. 5 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
8 */ 6 */
9 7
10/*
11 * open/fcntl - O_SYNC is only implemented on blocks devices and on
12 * files located on an ext2 file system
13 */
14#define O_ACCMODE 0003
15#define O_RDONLY 00
16#define O_WRONLY 01
17#define O_RDWR 02
18#define O_CREAT 0100 /* not fcntl */
19#define O_EXCL 0200 /* not fcntl */
20#define O_NOCTTY 0400 /* not fcntl */
21#define O_TRUNC 01000 /* not fcntl */
22#define O_APPEND 02000
23#define O_NONBLOCK 04000
24#define O_NDELAY O_NONBLOCK
25#define O_SYNC 010000
26#define FASYNC 020000 /* fcntl, for BSD compatibility */
27#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
28#define O_LARGEFILE 0100000
29#define O_DIRECTORY 0200000 /* must be a directory */
30#define O_NOFOLLOW 0400000 /* don't follow links */
31#define O_NOATIME 01000000
32
33#define F_DUPFD 0 /* dup */
34#define F_GETFD 1 /* get close_on_exec */
35#define F_SETFD 2 /* set/clear close_on_exec */
36#define F_GETFL 3 /* get file->f_flags */
37#define F_SETFL 4 /* set file->f_flags */
38#define F_GETLK 5
39#define F_SETLK 6
40#define F_SETLKW 7
41
42#define F_SETOWN 8 /* for sockets. */
43#define F_GETOWN 9 /* for sockets. */
44#define F_SETSIG 10 /* for sockets. */
45#define F_GETSIG 11 /* for sockets. */
46
47/* for F_[GET|SET]FL */
48#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
49
50/* for posix fcntl() and lockf() */
51#define F_RDLCK 0
52#define F_WRLCK 1
53#define F_UNLCK 2
54
55/* for old implementation of bsd flock () */
56#define F_EXLCK 4 /* or 3 */
57#define F_SHLCK 8 /* or 4 */
58
59/* for leases */
60#define F_INPROGRESS 16
61
62/* operations for bsd flock(), also used by the kernel implementation */
63#define LOCK_SH 1 /* shared lock */
64#define LOCK_EX 2 /* exclusive lock */
65#define LOCK_NB 4 /* or'd with one of the above to prevent
66 blocking */
67#define LOCK_UN 8 /* remove lock */
68
69#define LOCK_MAND 32 /* This is a mandatory flock */
70#define LOCK_READ 64 /* ... Which allows concurrent read operations */
71#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
72#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
73
74struct flock {
75 short l_type;
76 short l_whence;
77 off_t l_start;
78 off_t l_len;
79 pid_t l_pid;
80};
81
82#define F_LINUX_SPECIFIC_BASE 1024
83
84#define force_o_largefile() \ 8#define force_o_largefile() \
85 (personality(current->personality) != PER_LINUX32) 9 (personality(current->personality) != PER_LINUX32)
86 10
11#include <asm-generic/fcntl.h>
12
87#endif /* _ASM_IA64_FCNTL_H */ 13#endif /* _ASM_IA64_FCNTL_H */
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-ia64/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-ia64/hdreg.h b/include/asm-ia64/hdreg.h
deleted file mode 100644
index 83b5161d2678..000000000000
--- a/include/asm-ia64/hdreg.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * linux/include/asm-ia64/hdreg.h
3 *
4 * Copyright (C) 1994-1996 Linus Torvalds & authors
5 */
6
7#warning this file is obsolete, please do not use it
8
9#ifndef __ASM_IA64_HDREG_H
10#define __ASM_IA64_HDREG_H
11
12typedef unsigned short ide_ioreg_t;
13
14#endif /* __ASM_IA64_HDREG_H */
diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h
index 041ab8c51a64..0cf119b42f7d 100644
--- a/include/asm-ia64/hw_irq.h
+++ b/include/asm-ia64/hw_irq.h
@@ -116,13 +116,6 @@ __ia64_local_vector_to_irq (ia64_vector vec)
116 * and to obtain the irq descriptor for a given irq number. 116 * and to obtain the irq descriptor for a given irq number.
117 */ 117 */
118 118
119/* Return a pointer to the irq descriptor for IRQ. */
120static inline irq_desc_t *
121irq_descp (int irq)
122{
123 return irq_desc + irq;
124}
125
126/* Extract the IA-64 vector that corresponds to IRQ. */ 119/* Extract the IA-64 vector that corresponds to IRQ. */
127static inline ia64_vector 120static inline ia64_vector
128irq_to_vector (int irq) 121irq_to_vector (int irq)
diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h
index bd07d11d9f37..cd984d08fd15 100644
--- a/include/asm-ia64/irq.h
+++ b/include/asm-ia64/irq.h
@@ -14,6 +14,11 @@
14#define NR_IRQS 256 14#define NR_IRQS 256
15#define NR_IRQ_VECTORS NR_IRQS 15#define NR_IRQ_VECTORS NR_IRQS
16 16
17/*
18 * IRQ line status macro IRQ_PER_CPU is used
19 */
20#define ARCH_HAS_IRQ_PER_CPU
21
17static __inline__ int 22static __inline__ int
18irq_canonicalize (int irq) 23irq_canonicalize (int irq)
19{ 24{
@@ -30,12 +35,6 @@ extern void disable_irq_nosync (unsigned int);
30extern void enable_irq (unsigned int); 35extern void enable_irq (unsigned int);
31extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); 36extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
32 37
33#ifdef CONFIG_SMP
34extern void move_irq(int irq);
35#else
36#define move_irq(irq)
37#endif
38
39struct irqaction; 38struct irqaction;
40struct pt_regs; 39struct pt_regs;
41int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); 40int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index bf36a32e37e4..573a3574a24f 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -92,6 +92,7 @@ struct arch_specific_insn {
92 kprobe_opcode_t insn; 92 kprobe_opcode_t insn;
93 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 93 #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
94 #define INST_FLAG_FIX_BRANCH_REG 2 94 #define INST_FLAG_FIX_BRANCH_REG 2
95 #define INST_FLAG_BREAK_INST 4
95 unsigned long inst_flag; 96 unsigned long inst_flag;
96 unsigned short target_br_reg; 97 unsigned short target_br_reg;
97}; 98};
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index 91bbd1f22461..94e07e727395 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -20,9 +20,6 @@
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21#include <asm/ustack.h> 21#include <asm/ustack.h>
22 22
23/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */
24#define ARCH_HAS_SCHED_DOMAIN
25
26#define IA64_NUM_DBG_REGS 8 23#define IA64_NUM_DBG_REGS 8
27/* 24/*
28 * Limits for PMC and PMD are set to less than maximum architected values 25 * Limits for PMC and PMD are set to less than maximum architected values
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 399bc29729fd..a9f738bf18a7 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -98,29 +98,6 @@ void build_cpu_to_node_map(void);
98 .nr_balance_failed = 0, \ 98 .nr_balance_failed = 0, \
99} 99}
100 100
101/* sched_domains SD_ALLNODES_INIT for IA64 NUMA machines */
102#define SD_ALLNODES_INIT (struct sched_domain) { \
103 .span = CPU_MASK_NONE, \
104 .parent = NULL, \
105 .groups = NULL, \
106 .min_interval = 64, \
107 .max_interval = 64*num_online_cpus(), \
108 .busy_factor = 128, \
109 .imbalance_pct = 133, \
110 .cache_hot_time = (10*1000000), \
111 .cache_nice_tries = 1, \
112 .busy_idx = 3, \
113 .idle_idx = 3, \
114 .newidle_idx = 0, /* unused */ \
115 .wake_idx = 0, /* unused */ \
116 .forkexec_idx = 0, /* unused */ \
117 .per_cpu_gain = 100, \
118 .flags = SD_LOAD_BALANCE, \
119 .last_balance = jiffies, \
120 .balance_interval = 64, \
121 .nr_balance_failed = 0, \
122}
123
124#endif /* CONFIG_NUMA */ 101#endif /* CONFIG_NUMA */
125 102
126#include <asm-generic/topology.h> 103#include <asm-generic/topology.h>
diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h
index 8edd9a90949c..3a7829bb5954 100644
--- a/include/asm-ia64/uaccess.h
+++ b/include/asm-ia64/uaccess.h
@@ -72,13 +72,6 @@
72}) 72})
73#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) 73#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
74 74
75/* this function will go away soon - use access_ok() instead */
76static inline int __deprecated
77verify_area (int type, const void __user *addr, unsigned long size)
78{
79 return access_ok(type, addr, size) ? 0 : -EFAULT;
80}
81
82/* 75/*
83 * These are the main single-value transfer routines. They automatically 76 * These are the main single-value transfer routines. They automatically
84 * use the right size if we just have the right pointer type. 77 * use the right size if we just have the right pointer type.
diff --git a/include/asm-m32r/auxvec.h b/include/asm-m32r/auxvec.h
new file mode 100644
index 000000000000..f76dcc860fae
--- /dev/null
+++ b/include/asm-m32r/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef _ASM_M32R__AUXVEC_H
2#define _ASM_M32R__AUXVEC_H
3
4#endif /* _ASM_M32R__AUXVEC_H */
diff --git a/include/asm-m32r/fcntl.h b/include/asm-m32r/fcntl.h
index 3e3089572028..46ab12db5739 100644
--- a/include/asm-m32r/fcntl.h
+++ b/include/asm-m32r/fcntl.h
@@ -1,92 +1 @@
1#ifndef _ASM_M32R_FCNTL_H #include <asm-generic/fcntl.h>
2#define _ASM_M32R_FCNTL_H
3
4/* $Id$ */
5
6/* orig : i386 2.4.18 */
7
8/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
9 located on an ext2 file system */
10#define O_ACCMODE 0003
11#define O_RDONLY 00
12#define O_WRONLY 01
13#define O_RDWR 02
14#define O_CREAT 0100 /* not fcntl */
15#define O_EXCL 0200 /* not fcntl */
16#define O_NOCTTY 0400 /* not fcntl */
17#define O_TRUNC 01000 /* not fcntl */
18#define O_APPEND 02000
19#define O_NONBLOCK 04000
20#define O_NDELAY O_NONBLOCK
21#define O_SYNC 010000
22#define FASYNC 020000 /* fcntl, for BSD compatibility */
23#define O_DIRECT 040000 /* direct disk access hint */
24#define O_LARGEFILE 0100000
25#define O_DIRECTORY 0200000 /* must be a directory */
26#define O_NOFOLLOW 0400000 /* don't follow links */
27#define O_NOATIME 01000000
28
29#define F_DUPFD 0 /* dup */
30#define F_GETFD 1 /* get close_on_exec */
31#define F_SETFD 2 /* set/clear close_on_exec */
32#define F_GETFL 3 /* get file->f_flags */
33#define F_SETFL 4 /* set file->f_flags */
34#define F_GETLK 5
35#define F_SETLK 6
36#define F_SETLKW 7
37
38#define F_SETOWN 8 /* for sockets. */
39#define F_GETOWN 9 /* for sockets. */
40#define F_SETSIG 10 /* for sockets. */
41#define F_GETSIG 11 /* for sockets. */
42
43#define F_GETLK64 12 /* using 'struct flock64' */
44#define F_SETLK64 13
45#define F_SETLKW64 14
46
47/* for F_[GET|SET]FL */
48#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
49
50/* for posix fcntl() and lockf() */
51#define F_RDLCK 0
52#define F_WRLCK 1
53#define F_UNLCK 2
54
55/* for old implementation of bsd flock () */
56#define F_EXLCK 4 /* or 3 */
57#define F_SHLCK 8 /* or 4 */
58
59/* for leases */
60#define F_INPROGRESS 16
61
62/* operations for bsd flock(), also used by the kernel implementation */
63#define LOCK_SH 1 /* shared lock */
64#define LOCK_EX 2 /* exclusive lock */
65#define LOCK_NB 4 /* or'd with one of the above to prevent
66 blocking */
67#define LOCK_UN 8 /* remove lock */
68
69#define LOCK_MAND 32 /* This is a mandatory flock */
70#define LOCK_READ 64 /* ... Which allows concurrent read operations */
71#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
72#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
73
74struct flock {
75 short l_type;
76 short l_whence;
77 off_t l_start;
78 off_t l_len;
79 pid_t l_pid;
80};
81
82struct flock64 {
83 short l_type;
84 short l_whence;
85 loff_t l_start;
86 loff_t l_len;
87 pid_t l_pid;
88};
89
90#define F_LINUX_SPECIFIC_BASE 1024
91
92#endif /* _ASM_M32R_FCNTL_H */
diff --git a/include/asm-m32r/futex.h b/include/asm-m32r/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-m32r/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-m32r/hdreg.h b/include/asm-m32r/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-m32r/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-m32r/uaccess.h b/include/asm-m32r/uaccess.h
index bbb8ac4018a0..93d863c455a1 100644
--- a/include/asm-m32r/uaccess.h
+++ b/include/asm-m32r/uaccess.h
@@ -120,31 +120,6 @@ static inline int access_ok(int type, const void *addr, unsigned long size)
120} 120}
121#endif /* CONFIG_MMU */ 121#endif /* CONFIG_MMU */
122 122
123/**
124 * verify_area: - Obsolete/deprecated and will go away soon,
125 * use access_ok() instead.
126 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
127 * @addr: User space pointer to start of block to check
128 * @size: Size of block to check
129 *
130 * Context: User context only. This function may sleep.
131 *
132 * This function has been replaced by access_ok().
133 *
134 * Checks if a pointer to a block of memory in user space is valid.
135 *
136 * Returns zero if the memory block may be valid, -EFAULT
137 * if it is definitely invalid.
138 *
139 * See access_ok() for more details.
140 */
141static inline int __deprecated verify_area(int type, const void __user *addr,
142 unsigned long size)
143{
144 return access_ok(type, addr, size) ? 0 : -EFAULT;
145}
146
147
148/* 123/*
149 * The exception table consists of pairs of addresses: the first is the 124 * The exception table consists of pairs of addresses: the first is the
150 * address of an instruction that is allowed to fault, and the second is 125 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-m68k/auxvec.h b/include/asm-m68k/auxvec.h
new file mode 100644
index 000000000000..844d6d52204b
--- /dev/null
+++ b/include/asm-m68k/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMm68k_AUXVEC_H
2#define __ASMm68k_AUXVEC_H
3
4#endif
diff --git a/include/asm-m68k/fcntl.h b/include/asm-m68k/fcntl.h
index 0d4212983a33..1c369b20dc45 100644
--- a/include/asm-m68k/fcntl.h
+++ b/include/asm-m68k/fcntl.h
@@ -1,87 +1,11 @@
1#ifndef _M68K_FCNTL_H 1#ifndef _M68K_FCNTL_H
2#define _M68K_FCNTL_H 2#define _M68K_FCNTL_H
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECTORY 040000 /* must be a directory */ 4#define O_DIRECTORY 040000 /* must be a directory */
20#define O_NOFOLLOW 0100000 /* don't follow links */ 5#define O_NOFOLLOW 0100000 /* don't follow links */
21#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ 6#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
22#define O_LARGEFILE 0400000 7#define O_LARGEFILE 0400000
23#define O_NOATIME 01000000
24 8
25#define F_DUPFD 0 /* dup */ 9#include <asm-generic/fcntl.h>
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33 10
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif /* _M68K_FCNTL_H */ 11#endif /* _M68K_FCNTL_H */
diff --git a/include/asm-m68k/futex.h b/include/asm-m68k/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-m68k/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-m68k/hdreg.h b/include/asm-m68k/hdreg.h
deleted file mode 100644
index 5989bbc97cbf..000000000000
--- a/include/asm-m68k/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#warning this file is obsolete, please do not use it
diff --git a/include/asm-m68k/uaccess.h b/include/asm-m68k/uaccess.h
index 605e6cb811f8..f5cedf19cf68 100644
--- a/include/asm-m68k/uaccess.h
+++ b/include/asm-m68k/uaccess.h
@@ -14,12 +14,6 @@
14/* We let the MMU do all checking */ 14/* We let the MMU do all checking */
15#define access_ok(type,addr,size) 1 15#define access_ok(type,addr,size) 1
16 16
17/* this function will go away soon - use access_ok() instead */
18static inline int __deprecated verify_area(int type, const void *addr, unsigned long size)
19{
20 return access_ok(type,addr,size) ? 0 : -EFAULT;
21}
22
23/* 17/*
24 * The exception table consists of pairs of addresses: the first is the 18 * The exception table consists of pairs of addresses: the first is the
25 * address of an instruction that is allowed to fault, and the second is 19 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-m68knommu/auxvec.h b/include/asm-m68knommu/auxvec.h
new file mode 100644
index 000000000000..844d6d52204b
--- /dev/null
+++ b/include/asm-m68knommu/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMm68k_AUXVEC_H
2#define __ASMm68k_AUXVEC_H
3
4#endif
diff --git a/include/asm-m68knommu/futex.h b/include/asm-m68knommu/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-m68knommu/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-m68knommu/hdreg.h b/include/asm-m68knommu/hdreg.h
deleted file mode 100644
index 5cdd9b084d37..000000000000
--- a/include/asm-m68knommu/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-m68k/hdreg.h>
diff --git a/include/asm-m68knommu/uaccess.h b/include/asm-m68knommu/uaccess.h
index f0be74bb353c..05be9515a2d2 100644
--- a/include/asm-m68knommu/uaccess.h
+++ b/include/asm-m68knommu/uaccess.h
@@ -23,12 +23,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size)
23 (is_in_rom(addr) && is_in_rom(addr+size))); 23 (is_in_rom(addr) && is_in_rom(addr+size)));
24} 24}
25 25
26/* this function will go away soon - use access_ok() instead */
27extern inline int __deprecated verify_area(int type, const void * addr, unsigned long size)
28{
29 return access_ok(type,addr,size)?0:-EFAULT;
30}
31
32/* 26/*
33 * The exception table consists of pairs of addresses: the first is the 27 * The exception table consists of pairs of addresses: the first is the
34 * address of an instruction that is allowed to fault, and the second is 28 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-mips/auxvec.h b/include/asm-mips/auxvec.h
new file mode 100644
index 000000000000..7cf7f2d21943
--- /dev/null
+++ b/include/asm-mips/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef _ASM_AUXVEC_H
2#define _ASM_AUXVEC_H
3
4#endif /* _ASM_AUXVEC_H */
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h
index d78002afb1e1..2c084cd4bc0a 100644
--- a/include/asm-mips/compat.h
+++ b/include/asm-mips/compat.h
@@ -15,8 +15,10 @@ typedef s32 compat_clock_t;
15typedef s32 compat_suseconds_t; 15typedef s32 compat_suseconds_t;
16 16
17typedef s32 compat_pid_t; 17typedef s32 compat_pid_t;
18typedef s32 compat_uid_t; 18typedef u32 __compat_uid_t;
19typedef s32 compat_gid_t; 19typedef u32 __compat_gid_t;
20typedef u32 __compat_uid32_t;
21typedef u32 __compat_gid32_t;
20typedef u32 compat_mode_t; 22typedef u32 compat_mode_t;
21typedef u32 compat_ino_t; 23typedef u32 compat_ino_t;
22typedef u32 compat_dev_t; 24typedef u32 compat_dev_t;
@@ -52,8 +54,8 @@ struct compat_stat {
52 compat_ino_t st_ino; 54 compat_ino_t st_ino;
53 compat_mode_t st_mode; 55 compat_mode_t st_mode;
54 compat_nlink_t st_nlink; 56 compat_nlink_t st_nlink;
55 compat_uid_t st_uid; 57 __compat_uid32_t st_uid;
56 compat_gid_t st_gid; 58 __compat_gid32_t st_gid;
57 compat_dev_t st_rdev; 59 compat_dev_t st_rdev;
58 s32 st_pad2[2]; 60 s32 st_pad2[2];
59 compat_off_t st_size; 61 compat_off_t st_size;
diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h
index 2436392e7990..06c5d13faf66 100644
--- a/include/asm-mips/fcntl.h
+++ b/include/asm-mips/fcntl.h
@@ -8,33 +8,16 @@
8#ifndef _ASM_FCNTL_H 8#ifndef _ASM_FCNTL_H
9#define _ASM_FCNTL_H 9#define _ASM_FCNTL_H
10 10
11/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
12 located on an ext2 file system */
13#define O_ACCMODE 0x0003
14#define O_RDONLY 0x0000
15#define O_WRONLY 0x0001
16#define O_RDWR 0x0002
17#define O_APPEND 0x0008 11#define O_APPEND 0x0008
18#define O_SYNC 0x0010 12#define O_SYNC 0x0010
19#define O_NONBLOCK 0x0080 13#define O_NONBLOCK 0x0080
20#define O_CREAT 0x0100 /* not fcntl */ 14#define O_CREAT 0x0100 /* not fcntl */
21#define O_TRUNC 0x0200 /* not fcntl */
22#define O_EXCL 0x0400 /* not fcntl */ 15#define O_EXCL 0x0400 /* not fcntl */
23#define O_NOCTTY 0x0800 /* not fcntl */ 16#define O_NOCTTY 0x0800 /* not fcntl */
24#define FASYNC 0x1000 /* fcntl, for BSD compatibility */ 17#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
25#define O_LARGEFILE 0x2000 /* allow large file opens */ 18#define O_LARGEFILE 0x2000 /* allow large file opens */
26#define O_DIRECT 0x8000 /* direct disk access hint */ 19#define O_DIRECT 0x8000 /* direct disk access hint */
27#define O_DIRECTORY 0x10000 /* must be a directory */
28#define O_NOFOLLOW 0x20000 /* don't follow links */
29#define O_NOATIME 0x40000
30 20
31#define O_NDELAY O_NONBLOCK
32
33#define F_DUPFD 0 /* dup */
34#define F_GETFD 1 /* get close_on_exec */
35#define F_SETFD 2 /* set/clear close_on_exec */
36#define F_GETFL 3 /* get file->f_flags */
37#define F_SETFL 4 /* set file->f_flags */
38#define F_GETLK 14 21#define F_GETLK 14
39#define F_SETLK 6 22#define F_SETLK 6
40#define F_SETLKW 7 23#define F_SETLKW 7
@@ -50,33 +33,6 @@
50#define F_SETLKW64 35 33#define F_SETLKW64 35
51#endif 34#endif
52 35
53/* for F_[GET|SET]FL */
54#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
55
56/* for posix fcntl() and lockf() */
57#define F_RDLCK 0
58#define F_WRLCK 1
59#define F_UNLCK 2
60
61/* for old implementation of bsd flock () */
62#define F_EXLCK 4 /* or 3 */
63#define F_SHLCK 8 /* or 4 */
64
65/* for leases */
66#define F_INPROGRESS 16
67
68/* operations for bsd flock(), also used by the kernel implementation */
69#define LOCK_SH 1 /* shared lock */
70#define LOCK_EX 2 /* exclusive lock */
71#define LOCK_NB 4 /* or'd with one of the above to prevent
72 blocking */
73#define LOCK_UN 8 /* remove lock */
74
75#define LOCK_MAND 32 /* This is a mandatory flock */
76#define LOCK_READ 64 /* ... Which allows concurrent read operations */
77#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
78#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
79
80/* 36/*
81 * The flavours of struct flock. "struct flock" is the ABI compliant 37 * The flavours of struct flock. "struct flock" is the ABI compliant
82 * variant. Finally struct flock64 is the LFS variant of struct flock. As 38 * variant. Finally struct flock64 is the LFS variant of struct flock. As
@@ -86,7 +42,7 @@
86 42
87#ifndef __mips64 43#ifndef __mips64
88 44
89typedef struct flock { 45struct flock {
90 short l_type; 46 short l_type;
91 short l_whence; 47 short l_whence;
92 __kernel_off_t l_start; 48 __kernel_off_t l_start;
@@ -94,32 +50,17 @@ typedef struct flock {
94 long l_sysid; 50 long l_sysid;
95 __kernel_pid_t l_pid; 51 __kernel_pid_t l_pid;
96 long pad[4]; 52 long pad[4];
97} flock_t; 53};
98
99typedef struct flock64 {
100 short l_type;
101 short l_whence;
102 loff_t l_start;
103 loff_t l_len;
104 pid_t l_pid;
105} flock64_t;
106 54
107#else /* 64-bit definitions */ 55#define HAVE_ARCH_STRUCT_FLOCK
108 56
109typedef struct flock {
110 short l_type;
111 short l_whence;
112 __kernel_off_t l_start;
113 __kernel_off_t l_len;
114 __kernel_pid_t l_pid;
115} flock_t;
116
117#ifdef __KERNEL__
118#define flock64 flock
119#endif 57#endif
120 58
121#endif 59#include <asm-generic/fcntl.h>
122 60
123#define F_LINUX_SPECIFIC_BASE 1024 61typedef struct flock flock_t;
62#ifndef __mips64
63typedef struct flock64 flock64_t;
64#endif
124 65
125#endif /* _ASM_FCNTL_H */ 66#endif /* _ASM_FCNTL_H */
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
new file mode 100644
index 000000000000..9feff4ce1424
--- /dev/null
+++ b/include/asm-mips/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-mips/hdreg.h b/include/asm-mips/hdreg.h
deleted file mode 100644
index 5989bbc97cbf..000000000000
--- a/include/asm-mips/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#warning this file is obsolete, please do not use it
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index a543ead72ecf..5c2c98329012 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -112,29 +112,6 @@
112 likely(__access_ok((unsigned long)(addr), (size),__access_mask)) 112 likely(__access_ok((unsigned long)(addr), (size),__access_mask))
113 113
114/* 114/*
115 * verify_area: - Obsolete/deprecated and will go away soon,
116 * use access_ok() instead.
117 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
118 * @addr: User space pointer to start of block to check
119 * @size: Size of block to check
120 *
121 * Context: User context only. This function may sleep.
122 *
123 * This function has been replaced by access_ok().
124 *
125 * Checks if a pointer to a block of memory in user space is valid.
126 *
127 * Returns zero if the memory block may be valid, -EFAULT
128 * if it is definitely invalid.
129 *
130 * See access_ok() for more details.
131 */
132static inline int __deprecated verify_area(int type, const void * addr, unsigned long size)
133{
134 return access_ok(type, addr, size) ? 0 : -EFAULT;
135}
136
137/*
138 * put_user: - Write a simple value into user space. 115 * put_user: - Write a simple value into user space.
139 * @x: Value to copy to user space. 116 * @x: Value to copy to user space.
140 * @ptr: Destination address, in user space. 117 * @ptr: Destination address, in user space.
diff --git a/include/asm-parisc/auxvec.h b/include/asm-parisc/auxvec.h
new file mode 100644
index 000000000000..9c3ac4b89dc9
--- /dev/null
+++ b/include/asm-parisc/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMPARISC_AUXVEC_H
2#define __ASMPARISC_AUXVEC_H
3
4#endif
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h
index 7630d1ad2391..38b918feead9 100644
--- a/include/asm-parisc/compat.h
+++ b/include/asm-parisc/compat.h
@@ -13,8 +13,10 @@ typedef s32 compat_ssize_t;
13typedef s32 compat_time_t; 13typedef s32 compat_time_t;
14typedef s32 compat_clock_t; 14typedef s32 compat_clock_t;
15typedef s32 compat_pid_t; 15typedef s32 compat_pid_t;
16typedef u32 compat_uid_t; 16typedef u32 __compat_uid_t;
17typedef u32 compat_gid_t; 17typedef u32 __compat_gid_t;
18typedef u32 __compat_uid32_t;
19typedef u32 __compat_gid32_t;
18typedef u16 compat_mode_t; 20typedef u16 compat_mode_t;
19typedef u32 compat_ino_t; 21typedef u32 compat_ino_t;
20typedef u32 compat_dev_t; 22typedef u32 compat_dev_t;
@@ -67,8 +69,8 @@ struct compat_stat {
67 compat_dev_t st_realdev; 69 compat_dev_t st_realdev;
68 u16 st_basemode; 70 u16 st_basemode;
69 u16 st_spareshort; 71 u16 st_spareshort;
70 compat_uid_t st_uid; 72 __compat_uid32_t st_uid;
71 compat_gid_t st_gid; 73 __compat_gid32_t st_gid;
72 u32 st_spare4[3]; 74 u32 st_spare4[3];
73}; 75};
74 76
diff --git a/include/asm-parisc/fcntl.h b/include/asm-parisc/fcntl.h
index def35230716a..317851fa78f3 100644
--- a/include/asm-parisc/fcntl.h
+++ b/include/asm-parisc/fcntl.h
@@ -3,38 +3,22 @@
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */ 5 located on an ext2 file system */
6#define O_ACCMODE 00000003
7#define O_RDONLY 00000000
8#define O_WRONLY 00000001
9#define O_RDWR 00000002
10#define O_APPEND 00000010 6#define O_APPEND 00000010
11#define O_BLKSEEK 00000100 /* HPUX only */ 7#define O_BLKSEEK 00000100 /* HPUX only */
12#define O_CREAT 00000400 /* not fcntl */ 8#define O_CREAT 00000400 /* not fcntl */
13#define O_TRUNC 00001000 /* not fcntl */
14#define O_EXCL 00002000 /* not fcntl */ 9#define O_EXCL 00002000 /* not fcntl */
15#define O_LARGEFILE 00004000 10#define O_LARGEFILE 00004000
16#define O_SYNC 00100000 11#define O_SYNC 00100000
17#define O_NONBLOCK 00200004 /* HPUX has separate NDELAY & NONBLOCK */ 12#define O_NONBLOCK 00200004 /* HPUX has separate NDELAY & NONBLOCK */
18#define O_NDELAY O_NONBLOCK
19#define O_NOCTTY 00400000 /* not fcntl */ 13#define O_NOCTTY 00400000 /* not fcntl */
20#define O_DSYNC 01000000 /* HPUX only */ 14#define O_DSYNC 01000000 /* HPUX only */
21#define O_RSYNC 02000000 /* HPUX only */ 15#define O_RSYNC 02000000 /* HPUX only */
22#define O_NOATIME 04000000 16#define O_NOATIME 04000000
23 17
24#define FASYNC 00020000 /* fcntl, for BSD compatibility */
25#define O_DIRECT 00040000 /* direct disk access hint - currently ignored */
26#define O_DIRECTORY 00010000 /* must be a directory */ 18#define O_DIRECTORY 00010000 /* must be a directory */
27#define O_NOFOLLOW 00000200 /* don't follow links */ 19#define O_NOFOLLOW 00000200 /* don't follow links */
28#define O_INVISIBLE 04000000 /* invisible I/O, for DMAPI/XDSM */ 20#define O_INVISIBLE 04000000 /* invisible I/O, for DMAPI/XDSM */
29 21
30#define F_DUPFD 0 /* dup */
31#define F_GETFD 1 /* get f_flags */
32#define F_SETFD 2 /* set f_flags */
33#define F_GETFL 3 /* more flags (cloexec) */
34#define F_SETFL 4
35#define F_GETLK 5
36#define F_SETLK 6
37#define F_SETLKW 7
38#define F_GETLK64 8 22#define F_GETLK64 8
39#define F_SETLK64 9 23#define F_SETLK64 9
40#define F_SETLKW64 10 24#define F_SETLKW64 10
@@ -44,49 +28,11 @@
44#define F_SETSIG 13 /* for sockets. */ 28#define F_SETSIG 13 /* for sockets. */
45#define F_GETSIG 14 /* for sockets. */ 29#define F_GETSIG 14 /* for sockets. */
46 30
47/* for F_[GET|SET]FL */
48#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
49
50/* for posix fcntl() and lockf() */ 31/* for posix fcntl() and lockf() */
51#define F_RDLCK 01 32#define F_RDLCK 01
52#define F_WRLCK 02 33#define F_WRLCK 02
53#define F_UNLCK 03 34#define F_UNLCK 03
54 35
55/* for old implementation of bsd flock () */ 36#include <asm-generic/fcntl.h>
56#define F_EXLCK 4 /* or 3 */
57#define F_SHLCK 8 /* or 4 */
58
59/* for leases */
60#define F_INPROGRESS 16
61
62/* operations for bsd flock(), also used by the kernel implementation */
63#define LOCK_SH 1 /* shared lock */
64#define LOCK_EX 2 /* exclusive lock */
65#define LOCK_NB 4 /* or'd with one of the above to prevent
66 blocking */
67#define LOCK_UN 8 /* remove lock */
68
69#define LOCK_MAND 32 /* This is a mandatory flock */
70#define LOCK_READ 64 /* ... Which allows concurrent read operations */
71#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
72#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
73
74struct flock {
75 short l_type;
76 short l_whence;
77 off_t l_start;
78 off_t l_len;
79 pid_t l_pid;
80};
81
82struct flock64 {
83 short l_type;
84 short l_whence;
85 loff_t l_start;
86 loff_t l_len;
87 pid_t l_pid;
88};
89
90#define F_LINUX_SPECIFIC_BASE 1024
91 37
92#endif 38#endif
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-parisc/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-parisc/hdreg.h b/include/asm-parisc/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-parisc/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-parisc/irq.h b/include/asm-parisc/irq.h
index 75654ba93353..f876bdf22056 100644
--- a/include/asm-parisc/irq.h
+++ b/include/asm-parisc/irq.h
@@ -26,6 +26,11 @@
26 26
27#define NR_IRQS (CPU_IRQ_MAX + 1) 27#define NR_IRQS (CPU_IRQ_MAX + 1)
28 28
29/*
30 * IRQ line status macro IRQ_PER_CPU is used
31 */
32#define ARCH_HAS_IRQ_PER_CPU
33
29static __inline__ int irq_canonicalize(int irq) 34static __inline__ int irq_canonicalize(int irq)
30{ 35{
31 return (irq == 2) ? 9 : irq; 36 return (irq == 2) ? 9 : irq;
diff --git a/include/asm-parisc/uaccess.h b/include/asm-parisc/uaccess.h
index c1b5bdea53ee..f6c417c8c484 100644
--- a/include/asm-parisc/uaccess.h
+++ b/include/asm-parisc/uaccess.h
@@ -40,10 +40,6 @@ static inline long access_ok(int type, const void __user * addr,
40 return 1; 40 return 1;
41} 41}
42 42
43#define verify_area(type,addr,size) (0) /* FIXME: all users should go away soon,
44 * and use access_ok instead, then this
45 * should be removed. */
46
47#define put_user __put_user 43#define put_user __put_user
48#define get_user __get_user 44#define get_user __get_user
49 45
diff --git a/include/asm-powerpc/fcntl.h b/include/asm-powerpc/fcntl.h
new file mode 100644
index 000000000000..ce5c4516d404
--- /dev/null
+++ b/include/asm-powerpc/fcntl.h
@@ -0,0 +1,11 @@
1#ifndef _ASM_FCNTL_H
2#define _ASM_FCNTL_H
3
4#define O_DIRECTORY 040000 /* must be a directory */
5#define O_NOFOLLOW 0100000 /* don't follow links */
6#define O_LARGEFILE 0200000
7#define O_DIRECT 0400000 /* direct disk access hint */
8
9#include <asm-generic/fcntl.h>
10
11#endif /* _ASM_FCNTL_H */
diff --git a/include/asm-ppc/auxvec.h b/include/asm-ppc/auxvec.h
new file mode 100644
index 000000000000..172358df29c8
--- /dev/null
+++ b/include/asm-ppc/auxvec.h
@@ -0,0 +1,14 @@
1#ifndef __PPC_AUXVEC_H
2#define __PPC_AUXVEC_H
3
4/*
5 * We need to put in some extra aux table entries to tell glibc what
6 * the cache block size is, so it can use the dcbz instruction safely.
7 */
8#define AT_DCACHEBSIZE 19
9#define AT_ICACHEBSIZE 20
10#define AT_UCACHEBSIZE 21
11/* A special ignored type value for PPC, for glibc compatibility. */
12#define AT_IGNOREPPC 22
13
14#endif
diff --git a/include/asm-ppc/elf.h b/include/asm-ppc/elf.h
index 2c056966efd3..c25cc35e6ab5 100644
--- a/include/asm-ppc/elf.h
+++ b/include/asm-ppc/elf.h
@@ -7,6 +7,7 @@
7#include <asm/types.h> 7#include <asm/types.h>
8#include <asm/ptrace.h> 8#include <asm/ptrace.h>
9#include <asm/cputable.h> 9#include <asm/cputable.h>
10#include <asm/auxvec.h>
10 11
11/* PowerPC relocations defined by the ABIs */ 12/* PowerPC relocations defined by the ABIs */
12#define R_PPC_NONE 0 13#define R_PPC_NONE 0
@@ -122,16 +123,6 @@ extern int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpu);
122 123
123#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) 124#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
124 125
125/*
126 * We need to put in some extra aux table entries to tell glibc what
127 * the cache block size is, so it can use the dcbz instruction safely.
128 */
129#define AT_DCACHEBSIZE 19
130#define AT_ICACHEBSIZE 20
131#define AT_UCACHEBSIZE 21
132/* A special ignored type value for PPC, for glibc compatibility. */
133#define AT_IGNOREPPC 22
134
135extern int dcache_bsize; 126extern int dcache_bsize;
136extern int icache_bsize; 127extern int icache_bsize;
137extern int ucache_bsize; 128extern int ucache_bsize;
diff --git a/include/asm-ppc/fcntl.h b/include/asm-ppc/fcntl.h
deleted file mode 100644
index 5e28e41fb29f..000000000000
--- a/include/asm-ppc/fcntl.h
+++ /dev/null
@@ -1,93 +0,0 @@
1#ifndef _PPC_FCNTL_H
2#define _PPC_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECTORY 040000 /* must be a directory */
20#define O_NOFOLLOW 0100000 /* don't follow links */
21#define O_LARGEFILE 0200000
22#define O_DIRECT 0400000 /* direct disk access hint */
23#define O_NOATIME 01000000
24
25#define F_DUPFD 0 /* dup */
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70#ifdef __KERNEL__
71#define F_POSIX 1
72#define F_FLOCK 2
73#define F_BROKEN 4 /* broken flock() emulation */
74#endif /* __KERNEL__ */
75
76struct flock {
77 short l_type;
78 short l_whence;
79 off_t l_start;
80 off_t l_len;
81 pid_t l_pid;
82};
83
84struct flock64 {
85 short l_type;
86 short l_whence;
87 loff_t l_start;
88 loff_t l_len;
89 pid_t l_pid;
90};
91
92#define F_LINUX_SPECIFIC_BASE 1024
93#endif
diff --git a/include/asm-ppc/futex.h b/include/asm-ppc/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-ppc/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-ppc/ibm_ocp.h b/include/asm-ppc/ibm_ocp.h
index bd7656fa2026..6f10a25bd628 100644
--- a/include/asm-ppc/ibm_ocp.h
+++ b/include/asm-ppc/ibm_ocp.h
@@ -84,6 +84,7 @@ OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "%d\n", emac, mdio_idx) \
84OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "%d\n", emac, tah_idx) \ 84OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "%d\n", emac, tah_idx) \
85OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "%d\n", emac, phy_mode) \ 85OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "%d\n", emac, phy_mode) \
86OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "0x%08x\n", emac, phy_map) \ 86OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "0x%08x\n", emac, phy_map) \
87OCP_SYSFS_ADDTL(struct ocp_func_emac_data, "0x%08x\n", emac, phy_feat_exc)\
87 \ 88 \
88void ocp_show_emac_data(struct device *dev) \ 89void ocp_show_emac_data(struct device *dev) \
89{ \ 90{ \
@@ -99,6 +100,7 @@ void ocp_show_emac_data(struct device *dev) \
99 device_create_file(dev, &dev_attr_emac_tah_idx); \ 100 device_create_file(dev, &dev_attr_emac_tah_idx); \
100 device_create_file(dev, &dev_attr_emac_phy_mode); \ 101 device_create_file(dev, &dev_attr_emac_phy_mode); \
101 device_create_file(dev, &dev_attr_emac_phy_map); \ 102 device_create_file(dev, &dev_attr_emac_phy_map); \
103 device_create_file(dev, &dev_attr_emac_phy_feat_exc); \
102} 104}
103 105
104/* 106/*
diff --git a/include/asm-ppc/irq.h b/include/asm-ppc/irq.h
index a244d93ca953..b4b270457edd 100644
--- a/include/asm-ppc/irq.h
+++ b/include/asm-ppc/irq.h
@@ -19,6 +19,11 @@
19#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ 19#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
20#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ 20#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
21 21
22/*
23 * IRQ line status macro IRQ_PER_CPU is used
24 */
25#define ARCH_HAS_IRQ_PER_CPU
26
22#if defined(CONFIG_40x) 27#if defined(CONFIG_40x)
23#include <asm/ibm4xx.h> 28#include <asm/ibm4xx.h>
24 29
diff --git a/include/asm-ppc/uaccess.h b/include/asm-ppc/uaccess.h
index b044ae03ac56..63f56224da8c 100644
--- a/include/asm-ppc/uaccess.h
+++ b/include/asm-ppc/uaccess.h
@@ -37,13 +37,6 @@
37#define access_ok(type, addr, size) \ 37#define access_ok(type, addr, size) \
38 (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size))) 38 (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size)))
39 39
40/* this function will go away soon - use access_ok() instead */
41extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
42{
43 return access_ok(type, addr, size) ? 0 : -EFAULT;
44}
45
46
47/* 40/*
48 * The exception table consists of pairs of addresses: the first is the 41 * The exception table consists of pairs of addresses: the first is the
49 * address of an instruction that is allowed to fault, and the second is 42 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-ppc64/auxvec.h b/include/asm-ppc64/auxvec.h
new file mode 100644
index 000000000000..ac6381a106e1
--- /dev/null
+++ b/include/asm-ppc64/auxvec.h
@@ -0,0 +1,19 @@
1#ifndef __PPC64_AUXVEC_H
2#define __PPC64_AUXVEC_H
3
4/*
5 * We need to put in some extra aux table entries to tell glibc what
6 * the cache block size is, so it can use the dcbz instruction safely.
7 */
8#define AT_DCACHEBSIZE 19
9#define AT_ICACHEBSIZE 20
10#define AT_UCACHEBSIZE 21
11/* A special ignored type value for PPC, for glibc compatibility. */
12#define AT_IGNOREPPC 22
13
14/* The vDSO location. We have to use the same value as x86 for glibc's
15 * sake :-)
16 */
17#define AT_SYSINFO_EHDR 33
18
19#endif /* __PPC64_AUXVEC_H */
diff --git a/include/asm-ppc64/compat.h b/include/asm-ppc64/compat.h
index 12414f5fc666..6ec62cd2d1d1 100644
--- a/include/asm-ppc64/compat.h
+++ b/include/asm-ppc64/compat.h
@@ -13,8 +13,10 @@ typedef s32 compat_ssize_t;
13typedef s32 compat_time_t; 13typedef s32 compat_time_t;
14typedef s32 compat_clock_t; 14typedef s32 compat_clock_t;
15typedef s32 compat_pid_t; 15typedef s32 compat_pid_t;
16typedef u32 compat_uid_t; 16typedef u32 __compat_uid_t;
17typedef u32 compat_gid_t; 17typedef u32 __compat_gid_t;
18typedef u32 __compat_uid32_t;
19typedef u32 __compat_gid32_t;
18typedef u32 compat_mode_t; 20typedef u32 compat_mode_t;
19typedef u32 compat_ino_t; 21typedef u32 compat_ino_t;
20typedef u32 compat_dev_t; 22typedef u32 compat_dev_t;
@@ -48,8 +50,8 @@ struct compat_stat {
48 compat_ino_t st_ino; 50 compat_ino_t st_ino;
49 compat_mode_t st_mode; 51 compat_mode_t st_mode;
50 compat_nlink_t st_nlink; 52 compat_nlink_t st_nlink;
51 compat_uid_t st_uid; 53 __compat_uid32_t st_uid;
52 compat_gid_t st_gid; 54 __compat_gid32_t st_gid;
53 compat_dev_t st_rdev; 55 compat_dev_t st_rdev;
54 compat_off_t st_size; 56 compat_off_t st_size;
55 compat_off_t st_blksize; 57 compat_off_t st_blksize;
@@ -144,10 +146,10 @@ static inline void __user *compat_alloc_user_space(long len)
144 */ 146 */
145struct compat_ipc64_perm { 147struct compat_ipc64_perm {
146 compat_key_t key; 148 compat_key_t key;
147 compat_uid_t uid; 149 __compat_uid_t uid;
148 compat_gid_t gid; 150 __compat_gid_t gid;
149 compat_uid_t cuid; 151 __compat_uid_t cuid;
150 compat_gid_t cgid; 152 __compat_gid_t cgid;
151 compat_mode_t mode; 153 compat_mode_t mode;
152 unsigned int seq; 154 unsigned int seq;
153 unsigned int __pad2; 155 unsigned int __pad2;
diff --git a/include/asm-ppc64/elf.h b/include/asm-ppc64/elf.h
index 085eedb956fe..c919a89343db 100644
--- a/include/asm-ppc64/elf.h
+++ b/include/asm-ppc64/elf.h
@@ -4,6 +4,7 @@
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/ptrace.h> 5#include <asm/ptrace.h>
6#include <asm/cputable.h> 6#include <asm/cputable.h>
7#include <asm/auxvec.h>
7 8
8/* PowerPC relocations defined by the ABIs */ 9/* PowerPC relocations defined by the ABIs */
9#define R_PPC_NONE 0 10#define R_PPC_NONE 0
@@ -237,21 +238,6 @@ do { \
237 238
238#endif 239#endif
239 240
240/*
241 * We need to put in some extra aux table entries to tell glibc what
242 * the cache block size is, so it can use the dcbz instruction safely.
243 */
244#define AT_DCACHEBSIZE 19
245#define AT_ICACHEBSIZE 20
246#define AT_UCACHEBSIZE 21
247/* A special ignored type value for PPC, for glibc compatibility. */
248#define AT_IGNOREPPC 22
249
250/* The vDSO location. We have to use the same value as x86 for glibc's
251 * sake :-)
252 */
253#define AT_SYSINFO_EHDR 33
254
255extern int dcache_bsize; 241extern int dcache_bsize;
256extern int icache_bsize; 242extern int icache_bsize;
257extern int ucache_bsize; 243extern int ucache_bsize;
diff --git a/include/asm-ppc64/fcntl.h b/include/asm-ppc64/fcntl.h
deleted file mode 100644
index 842560d50656..000000000000
--- a/include/asm-ppc64/fcntl.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef _PPC64_FCNTL_H
2#define _PPC64_FCNTL_H
3
4/*
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
12 located on an ext2 file system */
13#define O_ACCMODE 0003
14#define O_RDONLY 00
15#define O_WRONLY 01
16#define O_RDWR 02
17#define O_CREAT 0100 /* not fcntl */
18#define O_EXCL 0200 /* not fcntl */
19#define O_NOCTTY 0400 /* not fcntl */
20#define O_TRUNC 01000 /* not fcntl */
21#define O_APPEND 02000
22#define O_NONBLOCK 04000
23#define O_NDELAY O_NONBLOCK
24#define O_SYNC 010000
25#define FASYNC 020000 /* fcntl, for BSD compatibility */
26#define O_DIRECTORY 040000 /* must be a directory */
27#define O_NOFOLLOW 0100000 /* don't follow links */
28#define O_LARGEFILE 0200000
29#define O_DIRECT 0400000 /* direct disk access hint */
30#define O_NOATIME 01000000
31
32#define F_DUPFD 0 /* dup */
33#define F_GETFD 1 /* get close_on_exec */
34#define F_SETFD 2 /* set/clear close_on_exec */
35#define F_GETFL 3 /* get file->f_flags */
36#define F_SETFL 4 /* set file->f_flags */
37#define F_GETLK 5
38#define F_SETLK 6
39#define F_SETLKW 7
40
41#define F_SETOWN 8 /* for sockets. */
42#define F_GETOWN 9 /* for sockets. */
43#define F_SETSIG 10 /* for sockets. */
44#define F_GETSIG 11 /* for sockets. */
45
46/* for F_[GET|SET]FL */
47#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
48
49/* for posix fcntl() and lockf() */
50#define F_RDLCK 0
51#define F_WRLCK 1
52#define F_UNLCK 2
53
54/* for old implementation of bsd flock () */
55#define F_EXLCK 4 /* or 3 */
56#define F_SHLCK 8 /* or 4 */
57
58/* for leases */
59#define F_INPROGRESS 16
60
61/* operations for bsd flock(), also used by the kernel implementation */
62#define LOCK_SH 1 /* shared lock */
63#define LOCK_EX 2 /* exclusive lock */
64#define LOCK_NB 4 /* or'd with one of the above to prevent
65 blocking */
66#define LOCK_UN 8 /* remove lock */
67
68#define LOCK_MAND 32 /* This is a mandatory flock */
69#define LOCK_READ 64 /* ... Which allows concurrent read operations */
70#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
71#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
72
73#ifdef __KERNEL__
74#define F_POSIX 1
75#define F_FLOCK 2
76#define F_BROKEN 4 /* broken flock() emulation */
77#endif /* __KERNEL__ */
78
79struct flock {
80 short l_type;
81 short l_whence;
82 off_t l_start;
83 off_t l_len;
84 pid_t l_pid;
85};
86
87#define F_LINUX_SPECIFIC_BASE 1024
88
89#endif /* _PPC64_FCNTL_H */
diff --git a/include/asm-ppc64/futex.h b/include/asm-ppc64/futex.h
new file mode 100644
index 000000000000..cb2640b3a408
--- /dev/null
+++ b/include/asm-ppc64/futex.h
@@ -0,0 +1,83 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/memory.h>
9#include <asm/uaccess.h>
10
11#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
12 __asm__ __volatile (SYNC_ON_SMP \
13"1: lwarx %0,0,%2\n" \
14 insn \
15"2: stwcx. %1,0,%2\n\
16 bne- 1b\n\
17 li %1,0\n\
183: .section .fixup,\"ax\"\n\
194: li %1,%3\n\
20 b 3b\n\
21 .previous\n\
22 .section __ex_table,\"a\"\n\
23 .align 3\n\
24 .llong 1b,4b,2b,4b\n\
25 .previous" \
26 : "=&r" (oldval), "=&r" (ret) \
27 : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
28 : "cr0", "memory")
29
30static inline int
31futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
32{
33 int op = (encoded_op >> 28) & 7;
34 int cmp = (encoded_op >> 24) & 15;
35 int oparg = (encoded_op << 8) >> 20;
36 int cmparg = (encoded_op << 20) >> 20;
37 int oldval = 0, ret;
38 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
39 oparg = 1 << oparg;
40
41 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
42 return -EFAULT;
43
44 inc_preempt_count();
45
46 switch (op) {
47 case FUTEX_OP_SET:
48 __futex_atomic_op("", ret, oldval, uaddr, oparg);
49 break;
50 case FUTEX_OP_ADD:
51 __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
52 break;
53 case FUTEX_OP_OR:
54 __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
55 break;
56 case FUTEX_OP_ANDN:
57 __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
58 break;
59 case FUTEX_OP_XOR:
60 __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
61 break;
62 default:
63 ret = -ENOSYS;
64 }
65
66 dec_preempt_count();
67
68 if (!ret) {
69 switch (cmp) {
70 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
71 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
72 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
73 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
74 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
75 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
76 default: ret = -ENOSYS;
77 }
78 }
79 return ret;
80}
81
82#endif
83#endif
diff --git a/include/asm-ppc64/irq.h b/include/asm-ppc64/irq.h
index 570678b1da95..99782afb4cde 100644
--- a/include/asm-ppc64/irq.h
+++ b/include/asm-ppc64/irq.h
@@ -33,6 +33,11 @@
33#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */ 33#define IRQ_POLARITY_POSITIVE 0x2 /* high level or low->high edge */
34#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */ 34#define IRQ_POLARITY_NEGATIVE 0x0 /* low level or high->low edge */
35 35
36/*
37 * IRQ line status macro IRQ_PER_CPU is used
38 */
39#define ARCH_HAS_IRQ_PER_CPU
40
36#define get_irq_desc(irq) (&irq_desc[(irq)]) 41#define get_irq_desc(irq) (&irq_desc[(irq)])
37 42
38/* Define a way to iterate across irqs. */ 43/* Define a way to iterate across irqs. */
diff --git a/include/asm-ppc64/kprobes.h b/include/asm-ppc64/kprobes.h
index 0802919c3235..d9129d2b038e 100644
--- a/include/asm-ppc64/kprobes.h
+++ b/include/asm-ppc64/kprobes.h
@@ -42,6 +42,9 @@ typedef unsigned int kprobe_opcode_t;
42 42
43#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry) 43#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
44 44
45#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
46 IS_TWI(instr) || IS_TDI(instr))
47
45#define ARCH_SUPPORTS_KRETPROBES 48#define ARCH_SUPPORTS_KRETPROBES
46void kretprobe_trampoline(void); 49void kretprobe_trampoline(void);
47 50
diff --git a/include/asm-ppc64/memory.h b/include/asm-ppc64/memory.h
index 56e09face9a8..af53ffb55726 100644
--- a/include/asm-ppc64/memory.h
+++ b/include/asm-ppc64/memory.h
@@ -18,9 +18,11 @@
18#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
19#define EIEIO_ON_SMP "eieio\n" 19#define EIEIO_ON_SMP "eieio\n"
20#define ISYNC_ON_SMP "\n\tisync" 20#define ISYNC_ON_SMP "\n\tisync"
21#define SYNC_ON_SMP "lwsync\n\t"
21#else 22#else
22#define EIEIO_ON_SMP 23#define EIEIO_ON_SMP
23#define ISYNC_ON_SMP 24#define ISYNC_ON_SMP
25#define SYNC_ON_SMP
24#endif 26#endif
25 27
26static inline void eieio(void) 28static inline void eieio(void)
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 7bd4796f1236..8bd7aa959385 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -311,6 +311,20 @@ name: \
311 .type GLUE(.,name),@function; \ 311 .type GLUE(.,name),@function; \
312GLUE(.,name): 312GLUE(.,name):
313 313
314#define _KPROBE(name) \
315 .section ".kprobes.text","a"; \
316 .align 2 ; \
317 .globl name; \
318 .globl GLUE(.,name); \
319 .section ".opd","aw"; \
320name: \
321 .quad GLUE(.,name); \
322 .quad .TOC.@tocbase; \
323 .quad 0; \
324 .previous; \
325 .type GLUE(.,name),@function; \
326GLUE(.,name):
327
314#define _STATIC(name) \ 328#define _STATIC(name) \
315 .section ".text"; \ 329 .section ".text"; \
316 .align 2 ; \ 330 .align 2 ; \
diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h
index 05b5943ab1ee..c181a60d868c 100644
--- a/include/asm-ppc64/uaccess.h
+++ b/include/asm-ppc64/uaccess.h
@@ -56,13 +56,6 @@
56#define access_ok(type,addr,size) \ 56#define access_ok(type,addr,size) \
57 __access_ok(((__force unsigned long)(addr)),(size),get_fs()) 57 __access_ok(((__force unsigned long)(addr)),(size),get_fs())
58 58
59/* this function will go away soon - use access_ok() instead */
60static inline int __deprecated verify_area(int type, const void __user *addr, unsigned long size)
61{
62 return access_ok(type,addr,size) ? 0 : -EFAULT;
63}
64
65
66/* 59/*
67 * The exception table consists of pairs of addresses: the first is the 60 * The exception table consists of pairs of addresses: the first is the
68 * address of an instruction that is allowed to fault, and the second is 61 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-s390/auxvec.h b/include/asm-s390/auxvec.h
new file mode 100644
index 000000000000..0d340720fd99
--- /dev/null
+++ b/include/asm-s390/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMS390_AUXVEC_H
2#define __ASMS390_AUXVEC_H
3
4#endif
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h
index 7f8f544eb262..a007715f4aea 100644
--- a/include/asm-s390/compat.h
+++ b/include/asm-s390/compat.h
@@ -13,10 +13,10 @@ typedef s32 compat_ssize_t;
13typedef s32 compat_time_t; 13typedef s32 compat_time_t;
14typedef s32 compat_clock_t; 14typedef s32 compat_clock_t;
15typedef s32 compat_pid_t; 15typedef s32 compat_pid_t;
16typedef u16 compat_uid_t; 16typedef u16 __compat_uid_t;
17typedef u16 compat_gid_t; 17typedef u16 __compat_gid_t;
18typedef u32 compat_uid32_t; 18typedef u32 __compat_uid32_t;
19typedef u32 compat_gid32_t; 19typedef u32 __compat_gid32_t;
20typedef u16 compat_mode_t; 20typedef u16 compat_mode_t;
21typedef u32 compat_ino_t; 21typedef u32 compat_ino_t;
22typedef u16 compat_dev_t; 22typedef u16 compat_dev_t;
@@ -51,8 +51,8 @@ struct compat_stat {
51 compat_ino_t st_ino; 51 compat_ino_t st_ino;
52 compat_mode_t st_mode; 52 compat_mode_t st_mode;
53 compat_nlink_t st_nlink; 53 compat_nlink_t st_nlink;
54 compat_uid_t st_uid; 54 __compat_uid_t st_uid;
55 compat_gid_t st_gid; 55 __compat_gid_t st_gid;
56 compat_dev_t st_rdev; 56 compat_dev_t st_rdev;
57 u16 __pad2; 57 u16 __pad2;
58 u32 st_size; 58 u32 st_size;
@@ -140,10 +140,10 @@ static inline void __user *compat_alloc_user_space(long len)
140 140
141struct compat_ipc64_perm { 141struct compat_ipc64_perm {
142 compat_key_t key; 142 compat_key_t key;
143 compat_uid32_t uid; 143 __compat_uid32_t uid;
144 compat_gid32_t gid; 144 __compat_gid32_t gid;
145 compat_uid32_t cuid; 145 __compat_uid32_t cuid;
146 compat_gid32_t cgid; 146 __compat_gid32_t cgid;
147 compat_mode_t mode; 147 compat_mode_t mode;
148 unsigned short __pad1; 148 unsigned short __pad1;
149 unsigned short seq; 149 unsigned short seq;
diff --git a/include/asm-s390/fcntl.h b/include/asm-s390/fcntl.h
index 48f692b45732..46ab12db5739 100644
--- a/include/asm-s390/fcntl.h
+++ b/include/asm-s390/fcntl.h
@@ -1,97 +1 @@
1/* #include <asm-generic/fcntl.h>
2 * include/asm-s390/fcntl.h
3 *
4 * S390 version
5 *
6 * Derived from "include/asm-i386/fcntl.h"
7 */
8#ifndef _S390_FCNTL_H
9#define _S390_FCNTL_H
10
11/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
12 located on an ext2 file system */
13#define O_ACCMODE 0003
14#define O_RDONLY 00
15#define O_WRONLY 01
16#define O_RDWR 02
17#define O_CREAT 0100 /* not fcntl */
18#define O_EXCL 0200 /* not fcntl */
19#define O_NOCTTY 0400 /* not fcntl */
20#define O_TRUNC 01000 /* not fcntl */
21#define O_APPEND 02000
22#define O_NONBLOCK 04000
23#define O_NDELAY O_NONBLOCK
24#define O_SYNC 010000
25#define FASYNC 020000 /* fcntl, for BSD compatibility */
26#define O_DIRECT 040000 /* direct disk access hint */
27#define O_LARGEFILE 0100000
28#define O_DIRECTORY 0200000 /* must be a directory */
29#define O_NOFOLLOW 0400000 /* don't follow links */
30#define O_NOATIME 01000000
31
32#define F_DUPFD 0 /* dup */
33#define F_GETFD 1 /* get close_on_exec */
34#define F_SETFD 2 /* set/clear close_on_exec */
35#define F_GETFL 3 /* get file->f_flags */
36#define F_SETFL 4 /* set file->f_flags */
37#define F_GETLK 5
38#define F_SETLK 6
39#define F_SETLKW 7
40
41#define F_SETOWN 8 /* for sockets. */
42#define F_GETOWN 9 /* for sockets. */
43#define F_SETSIG 10 /* for sockets. */
44#define F_GETSIG 11 /* for sockets. */
45
46#ifndef __s390x__
47#define F_GETLK64 12 /* using 'struct flock64' */
48#define F_SETLK64 13
49#define F_SETLKW64 14
50#endif /* ! __s390x__ */
51
52/* for F_[GET|SET]FL */
53#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
54
55/* for posix fcntl() and lockf() */
56#define F_RDLCK 0
57#define F_WRLCK 1
58#define F_UNLCK 2
59
60/* for old implementation of bsd flock () */
61#define F_EXLCK 4 /* or 3 */
62#define F_SHLCK 8 /* or 4 */
63
64/* for leases */
65#define F_INPROGRESS 16
66
67/* operations for bsd flock(), also used by the kernel implementation */
68#define LOCK_SH 1 /* shared lock */
69#define LOCK_EX 2 /* exclusive lock */
70#define LOCK_NB 4 /* or'd with one of the above to prevent
71 blocking */
72#define LOCK_UN 8 /* remove lock */
73
74#define LOCK_MAND 32 /* This is a mandatory flock */
75#define LOCK_READ 64 /* ... Which allows concurrent read operations */
76#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
77#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
78
79struct flock {
80 short l_type;
81 short l_whence;
82 off_t l_start;
83 off_t l_len;
84 pid_t l_pid;
85};
86
87#ifndef __s390x__
88struct flock64 {
89 short l_type;
90 short l_whence;
91 loff_t l_start;
92 loff_t l_len;
93 pid_t l_pid;
94};
95#endif
96#define F_LINUX_SPECIFIC_BASE 1024
97#endif
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-s390/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h
index 3e3bfe6a8fa8..38a5cf8ab9e3 100644
--- a/include/asm-s390/uaccess.h
+++ b/include/asm-s390/uaccess.h
@@ -65,13 +65,6 @@
65 65
66#define access_ok(type,addr,size) __access_ok(addr,size) 66#define access_ok(type,addr,size) __access_ok(addr,size)
67 67
68/* this function will go away soon - use access_ok() instead */
69extern inline int __deprecated verify_area(int type, const void __user *addr,
70 unsigned long size)
71{
72 return access_ok(type, addr, size) ? 0 : -EFAULT;
73}
74
75/* 68/*
76 * The exception table consists of pairs of addresses: the first is the 69 * The exception table consists of pairs of addresses: the first is the
77 * address of an instruction that is allowed to fault, and the second is 70 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-sh/auxvec.h b/include/asm-sh/auxvec.h
new file mode 100644
index 000000000000..fc21e4db5881
--- /dev/null
+++ b/include/asm-sh/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASM_SH_AUXVEC_H
2#define __ASM_SH_AUXVEC_H
3
4#endif /* __ASM_SH_AUXVEC_H */
diff --git a/include/asm-sh/fcntl.h b/include/asm-sh/fcntl.h
index 0b3ae524e34c..46ab12db5739 100644
--- a/include/asm-sh/fcntl.h
+++ b/include/asm-sh/fcntl.h
@@ -1,88 +1 @@
1#ifndef __ASM_SH_FCNTL_H #include <asm-generic/fcntl.h>
2#define __ASM_SH_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECT 040000 /* direct disk access hint - currently ignored */
20#define O_LARGEFILE 0100000
21#define O_DIRECTORY 0200000 /* must be a directory */
22#define O_NOFOLLOW 0400000 /* don't follow links */
23#define O_NOATIME 01000000
24
25#define F_DUPFD 0 /* dup */
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif /* __ASM_SH_FCNTL_H */
88
diff --git a/include/asm-sh/futex.h b/include/asm-sh/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-sh/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-sh/hdreg.h b/include/asm-sh/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-sh/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-sh/uaccess.h b/include/asm-sh/uaccess.h
index fb9e334afa2b..2cb01861e7c5 100644
--- a/include/asm-sh/uaccess.h
+++ b/include/asm-sh/uaccess.h
@@ -146,12 +146,6 @@ static inline int access_ok(int type, const void __user *p, unsigned long size)
146 return __access_ok(addr, size); 146 return __access_ok(addr, size);
147} 147}
148 148
149/* this function will go away soon - use access_ok() instead */
150static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
151{
152 return access_ok(type,addr,size) ? 0 : -EFAULT;
153}
154
155/* 149/*
156 * Uh, these should become the main single-value transfer routines ... 150 * Uh, these should become the main single-value transfer routines ...
157 * They automatically use the right size if we just have the right 151 * They automatically use the right size if we just have the right
diff --git a/include/asm-sh64/auxvec.h b/include/asm-sh64/auxvec.h
new file mode 100644
index 000000000000..1ad5a44bdc76
--- /dev/null
+++ b/include/asm-sh64/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASM_SH64_AUXVEC_H
2#define __ASM_SH64_AUXVEC_H
3
4#endif /* __ASM_SH64_AUXVEC_H */
diff --git a/include/asm-sh64/fcntl.h b/include/asm-sh64/fcntl.h
index ffcc36c64fa5..744dd79b9d5d 100644
--- a/include/asm-sh64/fcntl.h
+++ b/include/asm-sh64/fcntl.h
@@ -1,7 +1 @@
1#ifndef __ASM_SH64_FCNTL_H
2#define __ASM_SH64_FCNTL_H
3
4#include <asm-sh/fcntl.h> #include <asm-sh/fcntl.h>
5
6#endif /* __ASM_SH64_FCNTL_H */
7
diff --git a/include/asm-sh64/futex.h b/include/asm-sh64/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-sh64/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-sh64/hdreg.h b/include/asm-sh64/hdreg.h
deleted file mode 100644
index 52d983635a27..000000000000
--- a/include/asm-sh64/hdreg.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH64_HDREG_H
2#define __ASM_SH64_HDREG_H
3
4#include <asm-generic/hdreg.h>
5
6#endif /* __ASM_SH64_HDREG_H */
diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh64/uaccess.h
index a33654d576a1..56aa3cf0f273 100644
--- a/include/asm-sh64/uaccess.h
+++ b/include/asm-sh64/uaccess.h
@@ -60,12 +60,6 @@
60#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 60#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
61#define __access_ok(addr,size) (__range_ok(addr,size) == 0) 61#define __access_ok(addr,size) (__range_ok(addr,size) == 0)
62 62
63/* this function will go away soon - use access_ok() instead */
64extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
65{
66 return access_ok(type,addr,size) ? 0 : -EFAULT;
67}
68
69/* 63/*
70 * Uh, these should become the main single-value transfer routines ... 64 * Uh, these should become the main single-value transfer routines ...
71 * They automatically use the right size if we just have the right 65 * They automatically use the right size if we just have the right
diff --git a/include/asm-sparc/auxvec.h b/include/asm-sparc/auxvec.h
new file mode 100644
index 000000000000..ad6f360261f6
--- /dev/null
+++ b/include/asm-sparc/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASMSPARC_AUXVEC_H
2#define __ASMSPARC_AUXVEC_H
3
4#endif /* !(__ASMSPARC_AUXVEC_H) */
diff --git a/include/asm-sparc/fcntl.h b/include/asm-sparc/fcntl.h
index df9c75d41d68..5db60b5ae7b0 100644
--- a/include/asm-sparc/fcntl.h
+++ b/include/asm-sparc/fcntl.h
@@ -4,10 +4,6 @@
4 4
5/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 5/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
6 located on an ext2 file system */ 6 located on an ext2 file system */
7#define O_RDONLY 0x0000
8#define O_WRONLY 0x0001
9#define O_RDWR 0x0002
10#define O_ACCMODE 0x0003
11#define O_APPEND 0x0008 7#define O_APPEND 0x0008
12#define FASYNC 0x0040 /* fcntl, for BSD compatibility */ 8#define FASYNC 0x0040 /* fcntl, for BSD compatibility */
13#define O_CREAT 0x0200 /* not fcntl */ 9#define O_CREAT 0x0200 /* not fcntl */
@@ -17,73 +13,24 @@
17#define O_NONBLOCK 0x4000 13#define O_NONBLOCK 0x4000
18#define O_NDELAY (0x0004 | O_NONBLOCK) 14#define O_NDELAY (0x0004 | O_NONBLOCK)
19#define O_NOCTTY 0x8000 /* not fcntl */ 15#define O_NOCTTY 0x8000 /* not fcntl */
20#define O_DIRECTORY 0x10000 /* must be a directory */
21#define O_NOFOLLOW 0x20000 /* don't follow links */
22#define O_LARGEFILE 0x40000 16#define O_LARGEFILE 0x40000
23#define O_DIRECT 0x100000 /* direct disk access hint */ 17#define O_DIRECT 0x100000 /* direct disk access hint */
24#define O_NOATIME 0x200000 18#define O_NOATIME 0x200000
25 19
26#define F_DUPFD 0 /* dup */
27#define F_GETFD 1 /* get close_on_exec */
28#define F_SETFD 2 /* set/clear close_on_exec */
29#define F_GETFL 3 /* get file->f_flags */
30#define F_SETFL 4 /* set file->f_flags */
31#define F_GETOWN 5 /* for sockets. */ 20#define F_GETOWN 5 /* for sockets. */
32#define F_SETOWN 6 /* for sockets. */ 21#define F_SETOWN 6 /* for sockets. */
33#define F_GETLK 7 22#define F_GETLK 7
34#define F_SETLK 8 23#define F_SETLK 8
35#define F_SETLKW 9 24#define F_SETLKW 9
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45 25
46/* for posix fcntl() and lockf() */ 26/* for posix fcntl() and lockf() */
47#define F_RDLCK 1 27#define F_RDLCK 1
48#define F_WRLCK 2 28#define F_WRLCK 2
49#define F_UNLCK 3 29#define F_UNLCK 3
50 30
51/* for old implementation of bsd flock () */ 31#define __ARCH_FLOCK_PAD short __unused;
52#define F_EXLCK 4 /* or 3 */ 32#define __ARCH_FLOCK64_PAD short __unused;
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76 short __unused;
77};
78 33
79struct flock64 { 34#include <asm-generic/fcntl.h>
80 short l_type;
81 short l_whence;
82 loff_t l_start;
83 loff_t l_len;
84 pid_t l_pid;
85 short __unused;
86};
87 35
88#define F_LINUX_SPECIFIC_BASE 1024
89#endif 36#endif
diff --git a/include/asm-sparc/futex.h b/include/asm-sparc/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-sparc/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-sparc/hdreg.h b/include/asm-sparc/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-sparc/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h
index 0a780e84a12b..f8f1ec1f06e6 100644
--- a/include/asm-sparc/uaccess.h
+++ b/include/asm-sparc/uaccess.h
@@ -47,12 +47,6 @@
47#define access_ok(type, addr, size) \ 47#define access_ok(type, addr, size) \
48 ({ (void)(type); __access_ok((unsigned long)(addr), size); }) 48 ({ (void)(type); __access_ok((unsigned long)(addr), size); })
49 49
50/* this function will go away soon - use access_ok() instead */
51static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
52{
53 return access_ok(type,addr,size) ? 0 : -EFAULT;
54}
55
56/* 50/*
57 * The exception table consists of pairs of addresses: the first is the 51 * The exception table consists of pairs of addresses: the first is the
58 * address of an instruction that is allowed to fault, and the second is 52 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-sparc64/auxvec.h b/include/asm-sparc64/auxvec.h
new file mode 100644
index 000000000000..436a29129828
--- /dev/null
+++ b/include/asm-sparc64/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASM_SPARC64_AUXVEC_H
2#define __ASM_SPARC64_AUXVEC_H
3
4#endif /* !(__ASM_SPARC64_AUXVEC_H) */
diff --git a/include/asm-sparc64/compat.h b/include/asm-sparc64/compat.h
index b59122dd176d..c73935dc7ba1 100644
--- a/include/asm-sparc64/compat.h
+++ b/include/asm-sparc64/compat.h
@@ -12,8 +12,10 @@ typedef s32 compat_ssize_t;
12typedef s32 compat_time_t; 12typedef s32 compat_time_t;
13typedef s32 compat_clock_t; 13typedef s32 compat_clock_t;
14typedef s32 compat_pid_t; 14typedef s32 compat_pid_t;
15typedef u16 compat_uid_t; 15typedef u16 __compat_uid_t;
16typedef u16 compat_gid_t; 16typedef u16 __compat_gid_t;
17typedef u32 __compat_uid32_t;
18typedef u32 __compat_gid32_t;
17typedef u16 compat_mode_t; 19typedef u16 compat_mode_t;
18typedef u32 compat_ino_t; 20typedef u32 compat_ino_t;
19typedef u16 compat_dev_t; 21typedef u16 compat_dev_t;
@@ -47,8 +49,8 @@ struct compat_stat {
47 compat_ino_t st_ino; 49 compat_ino_t st_ino;
48 compat_mode_t st_mode; 50 compat_mode_t st_mode;
49 compat_nlink_t st_nlink; 51 compat_nlink_t st_nlink;
50 compat_uid_t st_uid; 52 __compat_uid_t st_uid;
51 compat_gid_t st_gid; 53 __compat_gid_t st_gid;
52 compat_dev_t st_rdev; 54 compat_dev_t st_rdev;
53 compat_off_t st_size; 55 compat_off_t st_size;
54 compat_time_t st_atime; 56 compat_time_t st_atime;
@@ -177,10 +179,10 @@ static __inline__ void __user *compat_alloc_user_space(long len)
177 179
178struct compat_ipc64_perm { 180struct compat_ipc64_perm {
179 compat_key_t key; 181 compat_key_t key;
180 __kernel_uid_t uid; 182 __compat_uid32_t uid;
181 __kernel_gid_t gid; 183 __compat_gid32_t gid;
182 __kernel_uid_t cuid; 184 __compat_uid32_t cuid;
183 __kernel_gid_t cgid; 185 __compat_gid32_t cgid;
184 unsigned short __pad1; 186 unsigned short __pad1;
185 compat_mode_t mode; 187 compat_mode_t mode;
186 unsigned short __pad2; 188 unsigned short __pad2;
diff --git a/include/asm-sparc64/fcntl.h b/include/asm-sparc64/fcntl.h
index e36def0d0d80..b2aecf0054bd 100644
--- a/include/asm-sparc64/fcntl.h
+++ b/include/asm-sparc64/fcntl.h
@@ -4,10 +4,6 @@
4 4
5/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 5/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
6 located on an ext2 file system */ 6 located on an ext2 file system */
7#define O_RDONLY 0x0000
8#define O_WRONLY 0x0001
9#define O_RDWR 0x0002
10#define O_ACCMODE 0x0003
11#define O_NDELAY 0x0004 7#define O_NDELAY 0x0004
12#define O_APPEND 0x0008 8#define O_APPEND 0x0008
13#define FASYNC 0x0040 /* fcntl, for BSD compatibility */ 9#define FASYNC 0x0040 /* fcntl, for BSD compatibility */
@@ -17,62 +13,24 @@
17#define O_SYNC 0x2000 13#define O_SYNC 0x2000
18#define O_NONBLOCK 0x4000 14#define O_NONBLOCK 0x4000
19#define O_NOCTTY 0x8000 /* not fcntl */ 15#define O_NOCTTY 0x8000 /* not fcntl */
20#define O_DIRECTORY 0x10000 /* must be a directory */
21#define O_NOFOLLOW 0x20000 /* don't follow links */
22#define O_LARGEFILE 0x40000 16#define O_LARGEFILE 0x40000
23#define O_DIRECT 0x100000 /* direct disk access hint */ 17#define O_DIRECT 0x100000 /* direct disk access hint */
24#define O_NOATIME 0x200000 18#define O_NOATIME 0x200000
25 19
26 20
27#define F_DUPFD 0 /* dup */
28#define F_GETFD 1 /* get close_on_exec */
29#define F_SETFD 2 /* set/clear close_on_exec */
30#define F_GETFL 3 /* get file->f_flags */
31#define F_SETFL 4 /* set file->f_flags */
32#define F_GETOWN 5 /* for sockets. */ 21#define F_GETOWN 5 /* for sockets. */
33#define F_SETOWN 6 /* for sockets. */ 22#define F_SETOWN 6 /* for sockets. */
34#define F_GETLK 7 23#define F_GETLK 7
35#define F_SETLK 8 24#define F_SETLK 8
36#define F_SETLKW 9 25#define F_SETLKW 9
37#define F_SETSIG 10 /* for sockets. */
38#define F_GETSIG 11 /* for sockets. */
39
40/* for F_[GET|SET]FL */
41#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
42 26
43/* for posix fcntl() and lockf() */ 27/* for posix fcntl() and lockf() */
44#define F_RDLCK 1 28#define F_RDLCK 1
45#define F_WRLCK 2 29#define F_WRLCK 2
46#define F_UNLCK 3 30#define F_UNLCK 3
47 31
48/* for old implementation of bsd flock () */ 32#define __ARCH_FLOCK_PAD short __unused;
49#define F_EXLCK 4 /* or 3 */
50#define F_SHLCK 8 /* or 4 */
51
52/* for leases */
53#define F_INPROGRESS 16
54
55/* operations for bsd flock(), also used by the kernel implementation */
56#define LOCK_SH 1 /* shared lock */
57#define LOCK_EX 2 /* exclusive lock */
58#define LOCK_NB 4 /* or'd with one of the above to prevent
59 blocking */
60#define LOCK_UN 8 /* remove lock */
61
62#define LOCK_MAND 32 /* This is a mandatory flock */
63#define LOCK_READ 64 /* ... Which allows concurrent read operations */
64#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
65#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
66
67struct flock {
68 short l_type;
69 short l_whence;
70 off_t l_start;
71 off_t l_len;
72 pid_t l_pid;
73 short __unused;
74};
75 33
76#define F_LINUX_SPECIFIC_BASE 1024 34#include <asm-generic/fcntl.h>
77 35
78#endif /* !(_SPARC64_FCNTL_H) */ 36#endif /* !(_SPARC64_FCNTL_H) */
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-sparc64/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-sparc64/hdreg.h b/include/asm-sparc64/hdreg.h
deleted file mode 100644
index 7f7fd1af0af3..000000000000
--- a/include/asm-sparc64/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/hdreg.h>
diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h
index 5690142f82de..80a65d7e3dbf 100644
--- a/include/asm-sparc64/uaccess.h
+++ b/include/asm-sparc64/uaccess.h
@@ -59,12 +59,6 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si
59 return 1; 59 return 1;
60} 60}
61 61
62/* this function will go away soon - use access_ok() instead */
63static inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
64{
65 return 0;
66}
67
68/* 62/*
69 * The exception table consists of pairs of addresses: the first is the 63 * The exception table consists of pairs of addresses: the first is the
70 * address of an instruction that is allowed to fault, and the second is 64 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-um/auxvec.h b/include/asm-um/auxvec.h
new file mode 100644
index 000000000000..1e5e1c2fc9b1
--- /dev/null
+++ b/include/asm-um/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __UM_AUXVEC_H
2#define __UM_AUXVEC_H
3
4#endif
diff --git a/include/asm-um/futex.h b/include/asm-um/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-um/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-um/hdreg.h b/include/asm-um/hdreg.h
deleted file mode 100644
index cf6363abcab9..000000000000
--- a/include/asm-um/hdreg.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __UM_HDREG_H
2#define __UM_HDREG_H
3
4#include "asm/arch/hdreg.h"
5
6#endif
diff --git a/include/asm-v850/auxvec.h b/include/asm-v850/auxvec.h
new file mode 100644
index 000000000000..f493232d0224
--- /dev/null
+++ b/include/asm-v850/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __V850_AUXVEC_H__
2#define __V850_AUXVEC_H__
3
4#endif /* __V850_AUXVEC_H__ */
diff --git a/include/asm-v850/fcntl.h b/include/asm-v850/fcntl.h
index 31d4b5961221..3af4d56776dd 100644
--- a/include/asm-v850/fcntl.h
+++ b/include/asm-v850/fcntl.h
@@ -1,87 +1,11 @@
1#ifndef __V850_FCNTL_H__ 1#ifndef __V850_FCNTL_H__
2#define __V850_FCNTL_H__ 2#define __V850_FCNTL_H__
3 3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECTORY 040000 /* must be a directory */ 4#define O_DIRECTORY 040000 /* must be a directory */
20#define O_NOFOLLOW 0100000 /* don't follow links */ 5#define O_NOFOLLOW 0100000 /* don't follow links */
21#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */ 6#define O_DIRECT 0200000 /* direct disk access hint - currently ignored */
22#define O_LARGEFILE 0400000 7#define O_LARGEFILE 0400000
23#define O_NOATIME 01000000
24 8
25#define F_DUPFD 0 /* dup */ 9#include <asm-generic/fcntl.h>
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33 10
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39#define F_GETLK64 12 /* using 'struct flock64' */
40#define F_SETLK64 13
41#define F_SETLKW64 14
42
43/* for F_[GET|SET]FL */
44#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
45
46/* for posix fcntl() and lockf() */
47#define F_RDLCK 0
48#define F_WRLCK 1
49#define F_UNLCK 2
50
51/* for old implementation of bsd flock () */
52#define F_EXLCK 4 /* or 3 */
53#define F_SHLCK 8 /* or 4 */
54
55/* for leases */
56#define F_INPROGRESS 16
57
58/* operations for bsd flock(), also used by the kernel implementation */
59#define LOCK_SH 1 /* shared lock */
60#define LOCK_EX 2 /* exclusive lock */
61#define LOCK_NB 4 /* or'd with one of the above to prevent
62 blocking */
63#define LOCK_UN 8 /* remove lock */
64
65#define LOCK_MAND 32 /* This is a mandatory flock */
66#define LOCK_READ 64 /* ... Which allows concurrent read operations */
67#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
68#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
69
70struct flock {
71 short l_type;
72 short l_whence;
73 off_t l_start;
74 off_t l_len;
75 pid_t l_pid;
76};
77
78struct flock64 {
79 short l_type;
80 short l_whence;
81 loff_t l_start;
82 loff_t l_len;
83 pid_t l_pid;
84};
85
86#define F_LINUX_SPECIFIC_BASE 1024
87#endif /* __V850_FCNTL_H__ */ 11#endif /* __V850_FCNTL_H__ */
diff --git a/include/asm-v850/futex.h b/include/asm-v850/futex.h
new file mode 100644
index 000000000000..2cac5ecd9d00
--- /dev/null
+++ b/include/asm-v850/futex.h
@@ -0,0 +1,53 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/uaccess.h>
9
10static inline int
11futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
12{
13 int op = (encoded_op >> 28) & 7;
14 int cmp = (encoded_op >> 24) & 15;
15 int oparg = (encoded_op << 8) >> 20;
16 int cmparg = (encoded_op << 20) >> 20;
17 int oldval = 0, ret, tem;
18 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
19 oparg = 1 << oparg;
20
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT;
23
24 inc_preempt_count();
25
26 switch (op) {
27 case FUTEX_OP_SET:
28 case FUTEX_OP_ADD:
29 case FUTEX_OP_OR:
30 case FUTEX_OP_ANDN:
31 case FUTEX_OP_XOR:
32 default:
33 ret = -ENOSYS;
34 }
35
36 dec_preempt_count();
37
38 if (!ret) {
39 switch (cmp) {
40 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
41 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
42 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
43 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
44 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
45 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
46 default: ret = -ENOSYS;
47 }
48 }
49 return ret;
50}
51
52#endif
53#endif
diff --git a/include/asm-v850/uaccess.h b/include/asm-v850/uaccess.h
index 4386cfc6a8dd..188b28597cf1 100644
--- a/include/asm-v850/uaccess.h
+++ b/include/asm-v850/uaccess.h
@@ -27,12 +27,6 @@ extern inline int access_ok (int type, const void *addr, unsigned long size)
27 return val >= (0x80 + NUM_CPU_IRQS*16) && val < 0xFFFFF000; 27 return val >= (0x80 + NUM_CPU_IRQS*16) && val < 0xFFFFF000;
28} 28}
29 29
30/* this function will go away soon - use access_ok() instead */
31extern inline int __deprecated verify_area (int type, const void *addr, unsigned long size)
32{
33 return access_ok (type, addr, size) ? 0 : -EFAULT;
34}
35
36/* 30/*
37 * The exception table consists of pairs of addresses: the first is the 31 * The exception table consists of pairs of addresses: the first is the
38 * address of an instruction that is allowed to fault, and the second is 32 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-x86_64/auxvec.h b/include/asm-x86_64/auxvec.h
new file mode 100644
index 000000000000..2403c4cfced2
--- /dev/null
+++ b/include/asm-x86_64/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __ASM_X86_64_AUXVEC_H
2#define __ASM_X86_64_AUXVEC_H
3
4#endif
diff --git a/include/asm-x86_64/compat.h b/include/asm-x86_64/compat.h
index d0f453c5adfc..f0155c38f639 100644
--- a/include/asm-x86_64/compat.h
+++ b/include/asm-x86_64/compat.h
@@ -14,10 +14,10 @@ typedef s32 compat_ssize_t;
14typedef s32 compat_time_t; 14typedef s32 compat_time_t;
15typedef s32 compat_clock_t; 15typedef s32 compat_clock_t;
16typedef s32 compat_pid_t; 16typedef s32 compat_pid_t;
17typedef u16 compat_uid_t; 17typedef u16 __compat_uid_t;
18typedef u16 compat_gid_t; 18typedef u16 __compat_gid_t;
19typedef u32 compat_uid32_t; 19typedef u32 __compat_uid32_t;
20typedef u32 compat_gid32_t; 20typedef u32 __compat_gid32_t;
21typedef u16 compat_mode_t; 21typedef u16 compat_mode_t;
22typedef u32 compat_ino_t; 22typedef u32 compat_ino_t;
23typedef u16 compat_dev_t; 23typedef u16 compat_dev_t;
@@ -52,8 +52,8 @@ struct compat_stat {
52 compat_ino_t st_ino; 52 compat_ino_t st_ino;
53 compat_mode_t st_mode; 53 compat_mode_t st_mode;
54 compat_nlink_t st_nlink; 54 compat_nlink_t st_nlink;
55 compat_uid_t st_uid; 55 __compat_uid_t st_uid;
56 compat_gid_t st_gid; 56 __compat_gid_t st_gid;
57 compat_dev_t st_rdev; 57 compat_dev_t st_rdev;
58 u16 __pad2; 58 u16 __pad2;
59 u32 st_size; 59 u32 st_size;
@@ -122,10 +122,10 @@ typedef u32 compat_sigset_word;
122 122
123struct compat_ipc64_perm { 123struct compat_ipc64_perm {
124 compat_key_t key; 124 compat_key_t key;
125 compat_uid32_t uid; 125 __compat_uid32_t uid;
126 compat_gid32_t gid; 126 __compat_gid32_t gid;
127 compat_uid32_t cuid; 127 __compat_uid32_t cuid;
128 compat_gid32_t cgid; 128 __compat_gid32_t cgid;
129 unsigned short mode; 129 unsigned short mode;
130 unsigned short __pad1; 130 unsigned short __pad1;
131 unsigned short seq; 131 unsigned short seq;
diff --git a/include/asm-x86_64/fcntl.h b/include/asm-x86_64/fcntl.h
index 4411f221c037..46ab12db5739 100644
--- a/include/asm-x86_64/fcntl.h
+++ b/include/asm-x86_64/fcntl.h
@@ -1,76 +1 @@
1#ifndef _X86_64_FCNTL_H #include <asm-generic/fcntl.h>
2#define _X86_64_FCNTL_H
3
4/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
5 located on an ext2 file system */
6#define O_ACCMODE 0003
7#define O_RDONLY 00
8#define O_WRONLY 01
9#define O_RDWR 02
10#define O_CREAT 0100 /* not fcntl */
11#define O_EXCL 0200 /* not fcntl */
12#define O_NOCTTY 0400 /* not fcntl */
13#define O_TRUNC 01000 /* not fcntl */
14#define O_APPEND 02000
15#define O_NONBLOCK 04000
16#define O_NDELAY O_NONBLOCK
17#define O_SYNC 010000
18#define FASYNC 020000 /* fcntl, for BSD compatibility */
19#define O_DIRECT 040000 /* direct disk access hint */
20#define O_LARGEFILE 0100000
21#define O_DIRECTORY 0200000 /* must be a directory */
22#define O_NOFOLLOW 0400000 /* don't follow links */
23#define O_NOATIME 01000000
24
25#define F_DUPFD 0 /* dup */
26#define F_GETFD 1 /* get close_on_exec */
27#define F_SETFD 2 /* set/clear close_on_exec */
28#define F_GETFL 3 /* get file->f_flags */
29#define F_SETFL 4 /* set file->f_flags */
30#define F_GETLK 5
31#define F_SETLK 6
32#define F_SETLKW 7
33
34#define F_SETOWN 8 /* for sockets. */
35#define F_GETOWN 9 /* for sockets. */
36#define F_SETSIG 10 /* for sockets. */
37#define F_GETSIG 11 /* for sockets. */
38
39/* for F_[GET|SET]FL */
40#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
41
42/* for posix fcntl() and lockf() */
43#define F_RDLCK 0
44#define F_WRLCK 1
45#define F_UNLCK 2
46
47/* for old implementation of bsd flock () */
48#define F_EXLCK 4 /* or 3 */
49#define F_SHLCK 8 /* or 4 */
50
51/* for leases */
52#define F_INPROGRESS 16
53
54/* operations for bsd flock(), also used by the kernel implementation */
55#define LOCK_SH 1 /* shared lock */
56#define LOCK_EX 2 /* exclusive lock */
57#define LOCK_NB 4 /* or'd with one of the above to prevent
58 blocking */
59#define LOCK_UN 8 /* remove lock */
60
61#define LOCK_MAND 32 /* This is a mandatory flock */
62#define LOCK_READ 64 /* ... Which allows concurrent read operations */
63#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
64#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
65
66struct flock {
67 short l_type;
68 short l_whence;
69 off_t l_start;
70 off_t l_len;
71 pid_t l_pid;
72};
73
74#define F_LINUX_SPECIFIC_BASE 1024
75
76#endif /* !_X86_64_FCNTL_H */
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
new file mode 100644
index 000000000000..8602c09bf89e
--- /dev/null
+++ b/include/asm-x86_64/futex.h
@@ -0,0 +1,98 @@
1#ifndef _ASM_FUTEX_H
2#define _ASM_FUTEX_H
3
4#ifdef __KERNEL__
5
6#include <linux/futex.h>
7#include <asm/errno.h>
8#include <asm/system.h>
9#include <asm/uaccess.h>
10
11#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
12 __asm__ __volatile ( \
13"1: " insn "\n" \
14"2: .section .fixup,\"ax\"\n\
153: mov %3, %1\n\
16 jmp 2b\n\
17 .previous\n\
18 .section __ex_table,\"a\"\n\
19 .align 8\n\
20 .quad 1b,3b\n\
21 .previous" \
22 : "=r" (oldval), "=r" (ret), "=m" (*uaddr) \
23 : "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0))
24
25#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
26 __asm__ __volatile ( \
27"1: movl %2, %0\n\
28 movl %0, %3\n" \
29 insn "\n" \
30"2: " LOCK_PREFIX "cmpxchgl %3, %2\n\
31 jnz 1b\n\
323: .section .fixup,\"ax\"\n\
334: mov %5, %1\n\
34 jmp 3b\n\
35 .previous\n\
36 .section __ex_table,\"a\"\n\
37 .align 8\n\
38 .quad 1b,4b,2b,4b\n\
39 .previous" \
40 : "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \
41 "=&r" (tem) \
42 : "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0))
43
44static inline int
45futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
46{
47 int op = (encoded_op >> 28) & 7;
48 int cmp = (encoded_op >> 24) & 15;
49 int oparg = (encoded_op << 8) >> 20;
50 int cmparg = (encoded_op << 20) >> 20;
51 int oldval = 0, ret, tem;
52 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
53 oparg = 1 << oparg;
54
55 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
56 return -EFAULT;
57
58 inc_preempt_count();
59
60 switch (op) {
61 case FUTEX_OP_SET:
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
63 break;
64 case FUTEX_OP_ADD:
65 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
66 uaddr, oparg);
67 break;
68 case FUTEX_OP_OR:
69 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
70 break;
71 case FUTEX_OP_ANDN:
72 __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
73 break;
74 case FUTEX_OP_XOR:
75 __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
76 break;
77 default:
78 ret = -ENOSYS;
79 }
80
81 dec_preempt_count();
82
83 if (!ret) {
84 switch (cmp) {
85 case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
86 case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
87 case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
88 case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
89 case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
90 case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
91 default: ret = -ENOSYS;
92 }
93 }
94 return ret;
95}
96
97#endif
98#endif
diff --git a/include/asm-x86_64/hdreg.h b/include/asm-x86_64/hdreg.h
deleted file mode 100644
index 5989bbc97cbf..000000000000
--- a/include/asm-x86_64/hdreg.h
+++ /dev/null
@@ -1 +0,0 @@
1#warning this file is obsolete, please do not use it
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 194160f6a43f..a8321999448f 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -398,7 +398,7 @@ static inline void prefetch(void *x)
398#define ARCH_HAS_PREFETCHW 1 398#define ARCH_HAS_PREFETCHW 1
399static inline void prefetchw(void *x) 399static inline void prefetchw(void *x)
400{ 400{
401 alternative_input(ASM_NOP5, 401 alternative_input("prefetcht0 (%1)",
402 "prefetchw (%1)", 402 "prefetchw (%1)",
403 X86_FEATURE_3DNOW, 403 X86_FEATURE_3DNOW,
404 "r" (x)); 404 "r" (x));
diff --git a/include/asm-x86_64/uaccess.h b/include/asm-x86_64/uaccess.h
index 48f292752c96..1bb8b8a24436 100644
--- a/include/asm-x86_64/uaccess.h
+++ b/include/asm-x86_64/uaccess.h
@@ -49,13 +49,6 @@
49 49
50#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0) 50#define access_ok(type, addr, size) (__range_not_ok(addr,size) == 0)
51 51
52/* this function will go away soon - use access_ok() instead */
53extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size)
54{
55 return access_ok(type,addr,size) ? 0 : -EFAULT;
56}
57
58
59/* 52/*
60 * The exception table consists of pairs of addresses: the first is the 53 * The exception table consists of pairs of addresses: the first is the
61 * address of an instruction that is allowed to fault, and the second is 54 * address of an instruction that is allowed to fault, and the second is
diff --git a/include/asm-xtensa/auxvec.h b/include/asm-xtensa/auxvec.h
new file mode 100644
index 000000000000..257dec75c5af
--- /dev/null
+++ b/include/asm-xtensa/auxvec.h
@@ -0,0 +1,4 @@
1#ifndef __XTENSA_AUXVEC_H
2#define __XTENSA_AUXVEC_H
3
4#endif
diff --git a/include/asm-xtensa/fcntl.h b/include/asm-xtensa/fcntl.h
index 48876bb727d2..ec066ae96caf 100644
--- a/include/asm-xtensa/fcntl.h
+++ b/include/asm-xtensa/fcntl.h
@@ -14,31 +14,17 @@
14 14
15/* open/fcntl - O_SYNC is only implemented on blocks devices and on files 15/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
16 located on an ext2 file system */ 16 located on an ext2 file system */
17#define O_ACCMODE 0x0003
18#define O_RDONLY 0x0000
19#define O_WRONLY 0x0001
20#define O_RDWR 0x0002
21#define O_APPEND 0x0008 17#define O_APPEND 0x0008
22#define O_SYNC 0x0010 18#define O_SYNC 0x0010
23#define O_NONBLOCK 0x0080 19#define O_NONBLOCK 0x0080
24#define O_CREAT 0x0100 /* not fcntl */ 20#define O_CREAT 0x0100 /* not fcntl */
25#define O_TRUNC 0x0200 /* not fcntl */
26#define O_EXCL 0x0400 /* not fcntl */ 21#define O_EXCL 0x0400 /* not fcntl */
27#define O_NOCTTY 0x0800 /* not fcntl */ 22#define O_NOCTTY 0x0800 /* not fcntl */
28#define FASYNC 0x1000 /* fcntl, for BSD compatibility */ 23#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
29#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */ 24#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */
30#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/ 25#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/
31#define O_DIRECTORY 0x10000 /* must be a directory */
32#define O_NOFOLLOW 0x20000 /* don't follow links */
33#define O_NOATIME 0x100000 26#define O_NOATIME 0x100000
34 27
35#define O_NDELAY O_NONBLOCK
36
37#define F_DUPFD 0 /* dup */
38#define F_GETFD 1 /* get close_on_exec */
39#define F_SETFD 2 /* set/clear close_on_exec */
40#define F_GETFL 3 /* get file->f_flags */
41#define F_SETFL 4 /* set file->f_flags */
42#define F_GETLK 14 28#define F_GETLK 14
43#define F_GETLK64 15 29#define F_GETLK64 15
44#define F_SETLK 6 30#define F_SETLK 6
@@ -48,35 +34,6 @@
48 34
49#define F_SETOWN 24 /* for sockets. */ 35#define F_SETOWN 24 /* for sockets. */
50#define F_GETOWN 23 /* for sockets. */ 36#define F_GETOWN 23 /* for sockets. */
51#define F_SETSIG 10 /* for sockets. */
52#define F_GETSIG 11 /* for sockets. */
53
54/* for F_[GET|SET]FL */
55#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
56
57/* for posix fcntl() and lockf() */
58#define F_RDLCK 0
59#define F_WRLCK 1
60#define F_UNLCK 2
61
62/* for old implementation of bsd flock () */
63#define F_EXLCK 4 /* or 3 */
64#define F_SHLCK 8 /* or 4 */
65
66/* for leases */
67#define F_INPROGRESS 16
68
69/* operations for bsd flock(), also used by the kernel implementation */
70#define LOCK_SH 1 /* shared lock */
71#define LOCK_EX 2 /* exclusive lock */
72#define LOCK_NB 4 /* or'd with one of the above to prevent
73 blocking */
74#define LOCK_UN 8 /* remove lock */
75
76#define LOCK_MAND 32 /* This is a mandatory flock ... */
77#define LOCK_READ 64 /* which allows concurrent read operations */
78#define LOCK_WRITE 128 /* which allows concurrent write operations */
79#define LOCK_RW 192 /* which allows concurrent read & write ops */
80 37
81typedef struct flock { 38typedef struct flock {
82 short l_type; 39 short l_type;
@@ -96,6 +53,9 @@ struct flock64 {
96 pid_t l_pid; 53 pid_t l_pid;
97}; 54};
98 55
99#define F_LINUX_SPECIFIC_BASE 1024 56#define HAVE_ARCH_STRUCT_FLOCK
57#define HAVE_ARCH_STRUCT_FLOCK64
58
59#include <asm-generic/fcntl.h>
100 60
101#endif /* _XTENSA_FCNTL_H */ 61#endif /* _XTENSA_FCNTL_H */
diff --git a/include/asm-xtensa/hdreg.h b/include/asm-xtensa/hdreg.h
deleted file mode 100644
index 64b80607b80d..000000000000
--- a/include/asm-xtensa/hdreg.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * include/asm-xtensa/hdreg.h
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License. See the file "COPYING" in the main directory of
6 * this archive for more details.
7 *
8 * Copyright (C) 2002 - 2005 Tensilica Inc.
9 * Copyright (C) 1994-1996 Linus Torvalds & authors
10 */
11
12#ifndef _XTENSA_HDREG_H
13#define _XTENSA_HDREG_H
14
15typedef unsigned int ide_ioreg_t;
16
17#endif
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
new file mode 100644
index 000000000000..9a7b374c9fb4
--- /dev/null
+++ b/include/linux/auxvec.h
@@ -0,0 +1,31 @@
1#ifndef _LINUX_AUXVEC_H
2#define _LINUX_AUXVEC_H
3
4#include <asm/auxvec.h>
5
6/* Symbolic values for the entries in the auxiliary table
7 put on the initial stack */
8#define AT_NULL 0 /* end of vector */
9#define AT_IGNORE 1 /* entry should be ignored */
10#define AT_EXECFD 2 /* file descriptor of program */
11#define AT_PHDR 3 /* program headers for program */
12#define AT_PHENT 4 /* size of program header entry */
13#define AT_PHNUM 5 /* number of program headers */
14#define AT_PAGESZ 6 /* system page size */
15#define AT_BASE 7 /* base address of interpreter */
16#define AT_FLAGS 8 /* flags */
17#define AT_ENTRY 9 /* entry point of program */
18#define AT_NOTELF 10 /* program is not ELF */
19#define AT_UID 11 /* real uid */
20#define AT_EUID 12 /* effective uid */
21#define AT_GID 13 /* real gid */
22#define AT_EGID 14 /* effective gid */
23#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
24#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
25#define AT_CLKTCK 17 /* frequency at which times() increments */
26
27#define AT_SECURE 23 /* secure mode boolean */
28
29#define AT_VECTOR_SIZE 42 /* Size of auxiliary table. */
30
31#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 36ef29fa0d8b..69e047989f1c 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -111,7 +111,6 @@ struct bio {
111 void *bi_private; 111 void *bi_private;
112 112
113 bio_destructor_t *bi_destructor; /* destructor */ 113 bio_destructor_t *bi_destructor; /* destructor */
114 struct bio_set *bi_set; /* memory pools set */
115}; 114};
116 115
117/* 116/*
@@ -280,6 +279,7 @@ extern void bioset_free(struct bio_set *);
280extern struct bio *bio_alloc(unsigned int __nocast, int); 279extern struct bio *bio_alloc(unsigned int __nocast, int);
281extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *); 280extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *);
282extern void bio_put(struct bio *); 281extern void bio_put(struct bio *);
282extern void bio_free(struct bio *, struct bio_set *);
283 283
284extern void bio_endio(struct bio *, unsigned int, int); 284extern void bio_endio(struct bio *, unsigned int, int);
285struct request_queue; 285struct request_queue;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b58b7d6f2fdb..f9ca534787e2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -18,6 +18,9 @@
18#define compat_jiffies_to_clock_t(x) \ 18#define compat_jiffies_to_clock_t(x) \
19 (((unsigned long)(x) * COMPAT_USER_HZ) / HZ) 19 (((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
20 20
21typedef __compat_uid32_t compat_uid_t;
22typedef __compat_gid32_t compat_gid_t;
23
21struct rusage; 24struct rusage;
22 25
23struct compat_itimerspec { 26struct compat_itimerspec {
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 3438233305a3..24062a1dbf61 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -23,7 +23,8 @@ void cpuset_init_current_mems_allowed(void);
23void cpuset_update_current_mems_allowed(void); 23void cpuset_update_current_mems_allowed(void);
24void cpuset_restrict_to_mems_allowed(unsigned long *nodes); 24void cpuset_restrict_to_mems_allowed(unsigned long *nodes);
25int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); 25int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
26int cpuset_zone_allowed(struct zone *z); 26extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask);
27extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
27extern struct file_operations proc_cpuset_operations; 28extern struct file_operations proc_cpuset_operations;
28extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); 29extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
29 30
@@ -48,7 +49,13 @@ static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
48 return 1; 49 return 1;
49} 50}
50 51
51static inline int cpuset_zone_allowed(struct zone *z) 52static inline int cpuset_zone_allowed(struct zone *z,
53 unsigned int __nocast gfp_mask)
54{
55 return 1;
56}
57
58static inline int cpuset_excl_nodes_overlap(const struct task_struct *p)
52{ 59{
53 return 1; 60 return 1;
54} 61}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 50be290d24d2..ab04b4f9b0db 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -88,8 +88,9 @@ struct dentry {
88 * negative */ 88 * negative */
89 /* 89 /*
90 * The next three fields are touched by __d_lookup. Place them here 90 * The next three fields are touched by __d_lookup. Place them here
91 * so they all fit in a 16-byte range, with 16-byte alignment. 91 * so they all fit in a cache line.
92 */ 92 */
93 struct hlist_node d_hash; /* lookup hash list */
93 struct dentry *d_parent; /* parent directory */ 94 struct dentry *d_parent; /* parent directory */
94 struct qstr d_name; 95 struct qstr d_name;
95 96
@@ -103,7 +104,6 @@ struct dentry {
103 void *d_fsdata; /* fs-specific data */ 104 void *d_fsdata; /* fs-specific data */
104 struct rcu_head d_rcu; 105 struct rcu_head d_rcu;
105 struct dcookie_struct *d_cookie; /* cookie, if any */ 106 struct dcookie_struct *d_cookie; /* cookie, if any */
106 struct hlist_node d_hash; /* lookup hash list */
107 int d_mounted; 107 int d_mounted;
108 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ 108 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
109}; 109};
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 5e93e6dce9a4..c30175e8dec6 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -1,6 +1,8 @@
1#ifndef __DMI_H__ 1#ifndef __DMI_H__
2#define __DMI_H__ 2#define __DMI_H__
3 3
4#include <linux/list.h>
5
4enum dmi_field { 6enum dmi_field {
5 DMI_NONE, 7 DMI_NONE,
6 DMI_BIOS_VENDOR, 8 DMI_BIOS_VENDOR,
@@ -16,6 +18,24 @@ enum dmi_field {
16 DMI_STRING_MAX, 18 DMI_STRING_MAX,
17}; 19};
18 20
21enum dmi_device_type {
22 DMI_DEV_TYPE_ANY = 0,
23 DMI_DEV_TYPE_OTHER,
24 DMI_DEV_TYPE_UNKNOWN,
25 DMI_DEV_TYPE_VIDEO,
26 DMI_DEV_TYPE_SCSI,
27 DMI_DEV_TYPE_ETHERNET,
28 DMI_DEV_TYPE_TOKENRING,
29 DMI_DEV_TYPE_SOUND,
30 DMI_DEV_TYPE_IPMI = -1
31};
32
33struct dmi_header {
34 u8 type;
35 u8 length;
36 u16 handle;
37};
38
19/* 39/*
20 * DMI callbacks for problem boards 40 * DMI callbacks for problem boards
21 */ 41 */
@@ -26,22 +46,32 @@ struct dmi_strmatch {
26 46
27struct dmi_system_id { 47struct dmi_system_id {
28 int (*callback)(struct dmi_system_id *); 48 int (*callback)(struct dmi_system_id *);
29 char *ident; 49 const char *ident;
30 struct dmi_strmatch matches[4]; 50 struct dmi_strmatch matches[4];
31 void *driver_data; 51 void *driver_data;
32}; 52};
33 53
34#define DMI_MATCH(a,b) { a, b } 54#define DMI_MATCH(a, b) { a, b }
55
56struct dmi_device {
57 struct list_head list;
58 int type;
59 const char *name;
60 void *device_data; /* Type specific data */
61};
35 62
36#if defined(CONFIG_X86) && !defined(CONFIG_X86_64) 63#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
37 64
38extern int dmi_check_system(struct dmi_system_id *list); 65extern int dmi_check_system(struct dmi_system_id *list);
39extern char * dmi_get_system_info(int field); 66extern char * dmi_get_system_info(int field);
40 67extern struct dmi_device * dmi_find_device(int type, const char *name,
68 struct dmi_device *from);
41#else 69#else
42 70
43static inline int dmi_check_system(struct dmi_system_id *list) { return 0; } 71static inline int dmi_check_system(struct dmi_system_id *list) { return 0; }
44static inline char * dmi_get_system_info(int field) { return NULL; } 72static inline char * dmi_get_system_info(int field) { return NULL; }
73static struct dmi_device * dmi_find_device(int type, const char *name,
74 struct dmi_device *from) { return NULL; }
45 75
46#endif 76#endif
47 77
diff --git a/include/linux/elf.h b/include/linux/elf.h
index f5b3ba5a317d..ff955dbf510d 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -2,6 +2,7 @@
2#define _LINUX_ELF_H 2#define _LINUX_ELF_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/auxvec.h>
5#include <asm/elf.h> 6#include <asm/elf.h>
6 7
7#ifndef elf_read_implies_exec 8#ifndef elf_read_implies_exec
@@ -158,29 +159,6 @@ typedef __s64 Elf64_Sxword;
158#define ELF64_ST_BIND(x) ELF_ST_BIND(x) 159#define ELF64_ST_BIND(x) ELF_ST_BIND(x)
159#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x) 160#define ELF64_ST_TYPE(x) ELF_ST_TYPE(x)
160 161
161/* Symbolic values for the entries in the auxiliary table
162 put on the initial stack */
163#define AT_NULL 0 /* end of vector */
164#define AT_IGNORE 1 /* entry should be ignored */
165#define AT_EXECFD 2 /* file descriptor of program */
166#define AT_PHDR 3 /* program headers for program */
167#define AT_PHENT 4 /* size of program header entry */
168#define AT_PHNUM 5 /* number of program headers */
169#define AT_PAGESZ 6 /* system page size */
170#define AT_BASE 7 /* base address of interpreter */
171#define AT_FLAGS 8 /* flags */
172#define AT_ENTRY 9 /* entry point of program */
173#define AT_NOTELF 10 /* program is not ELF */
174#define AT_UID 11 /* real uid */
175#define AT_EUID 12 /* effective uid */
176#define AT_GID 13 /* real gid */
177#define AT_EGID 14 /* effective gid */
178#define AT_PLATFORM 15 /* string identifying CPU for optimizations */
179#define AT_HWCAP 16 /* arch dependent hints at CPU capabilities */
180#define AT_CLKTCK 17 /* frequency at which times() increments */
181
182#define AT_SECURE 23 /* secure mode boolean */
183
184typedef struct dynamic{ 162typedef struct dynamic{
185 Elf32_Sword d_tag; 163 Elf32_Sword d_tag;
186 union{ 164 union{
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index a657130ba03a..f7bd1c7ebefb 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -313,6 +313,9 @@ struct ext2_inode {
313#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */ 313#define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */
314#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */ 314#define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */
315#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */ 315#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */
316#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
317#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
318
316 319
317#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt 320#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
318#define set_opt(o, opt) o |= EXT2_MOUNT_##opt 321#define set_opt(o, opt) o |= EXT2_MOUNT_##opt
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index c16662836c58..c0272d73ab20 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -373,6 +373,8 @@ struct ext3_inode {
373#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */ 373#define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */
374#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */ 374#define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */
375#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ 375#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
376#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
377#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
376 378
377/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ 379/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
378#ifndef _LINUX_EXT2_FS_H 380#ifndef _LINUX_EXT2_FS_H
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 886255b69bb9..2063c0839d4f 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -3,6 +3,9 @@
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/types.h> 4#include <linux/types.h>
5#define FIRMWARE_NAME_MAX 30 5#define FIRMWARE_NAME_MAX 30
6#define FW_ACTION_NOHOTPLUG 0
7#define FW_ACTION_HOTPLUG 1
8
6struct firmware { 9struct firmware {
7 size_t size; 10 size_t size;
8 u8 *data; 11 u8 *data;
@@ -11,7 +14,7 @@ struct device;
11int request_firmware(const struct firmware **fw, const char *name, 14int request_firmware(const struct firmware **fw, const char *name,
12 struct device *device); 15 struct device *device);
13int request_firmware_nowait( 16int request_firmware_nowait(
14 struct module *module, 17 struct module *module, int hotplug,
15 const char *name, struct device *device, void *context, 18 const char *name, struct device *device, void *context,
16 void (*cont)(const struct firmware *fw, void *context)); 19 void (*cont)(const struct firmware *fw, void *context));
17 20
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 67e6732d4fdc..fd93ab7da905 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -69,6 +69,7 @@ extern int dir_notify_enable;
69#define READ 0 69#define READ 0
70#define WRITE 1 70#define WRITE 1
71#define READA 2 /* read-ahead - don't block if no resources */ 71#define READA 2 /* read-ahead - don't block if no resources */
72#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
72#define SPECIAL 4 /* For non-blockdevice requests in request queue */ 73#define SPECIAL 4 /* For non-blockdevice requests in request queue */
73#define READ_SYNC (READ | (1 << BIO_RW_SYNC)) 74#define READ_SYNC (READ | (1 << BIO_RW_SYNC))
74#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) 75#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
@@ -281,19 +282,9 @@ struct iattr {
281 struct timespec ia_atime; 282 struct timespec ia_atime;
282 struct timespec ia_mtime; 283 struct timespec ia_mtime;
283 struct timespec ia_ctime; 284 struct timespec ia_ctime;
284 unsigned int ia_attr_flags;
285}; 285};
286 286
287/* 287/*
288 * This is the inode attributes flag definitions
289 */
290#define ATTR_FLAG_SYNCRONOUS 1 /* Syncronous write */
291#define ATTR_FLAG_NOATIME 2 /* Don't update atime */
292#define ATTR_FLAG_APPEND 4 /* Append-only file */
293#define ATTR_FLAG_IMMUTABLE 8 /* Immutable file */
294#define ATTR_FLAG_NODIRATIME 16 /* Don't update atime for directory */
295
296/*
297 * Includes for diskquotas. 288 * Includes for diskquotas.
298 */ 289 */
299#include <linux/quota.h> 290#include <linux/quota.h>
@@ -594,7 +585,6 @@ struct file {
594 unsigned int f_uid, f_gid; 585 unsigned int f_uid, f_gid;
595 struct file_ra_state f_ra; 586 struct file_ra_state f_ra;
596 587
597 size_t f_maxcount;
598 unsigned long f_version; 588 unsigned long f_version;
599 void *f_security; 589 void *f_security;
600 590
@@ -1291,6 +1281,7 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
1291/* fs/open.c */ 1281/* fs/open.c */
1292 1282
1293extern int do_truncate(struct dentry *, loff_t start); 1283extern int do_truncate(struct dentry *, loff_t start);
1284extern long do_sys_open(const char __user *filename, int flags, int mode);
1294extern struct file *filp_open(const char *, int, int); 1285extern struct file *filp_open(const char *, int, int);
1295extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); 1286extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
1296extern int filp_close(struct file *, fl_owner_t id); 1287extern int filp_close(struct file *, fl_owner_t id);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 65d6cfdb6d39..10f96c31971e 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -4,14 +4,40 @@
4/* Second argument to futex syscall */ 4/* Second argument to futex syscall */
5 5
6 6
7#define FUTEX_WAIT (0) 7#define FUTEX_WAIT 0
8#define FUTEX_WAKE (1) 8#define FUTEX_WAKE 1
9#define FUTEX_FD (2) 9#define FUTEX_FD 2
10#define FUTEX_REQUEUE (3) 10#define FUTEX_REQUEUE 3
11#define FUTEX_CMP_REQUEUE (4) 11#define FUTEX_CMP_REQUEUE 4
12#define FUTEX_WAKE_OP 5
12 13
13long do_futex(unsigned long uaddr, int op, int val, 14long do_futex(unsigned long uaddr, int op, int val,
14 unsigned long timeout, unsigned long uaddr2, int val2, 15 unsigned long timeout, unsigned long uaddr2, int val2,
15 int val3); 16 int val3);
16 17
18#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
19#define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */
20#define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */
21#define FUTEX_OP_ANDN 3 /* *(int *)UADDR2 &= ~OPARG; */
22#define FUTEX_OP_XOR 4 /* *(int *)UADDR2 ^= OPARG; */
23
24#define FUTEX_OP_OPARG_SHIFT 8 /* Use (1 << OPARG) instead of OPARG. */
25
26#define FUTEX_OP_CMP_EQ 0 /* if (oldval == CMPARG) wake */
27#define FUTEX_OP_CMP_NE 1 /* if (oldval != CMPARG) wake */
28#define FUTEX_OP_CMP_LT 2 /* if (oldval < CMPARG) wake */
29#define FUTEX_OP_CMP_LE 3 /* if (oldval <= CMPARG) wake */
30#define FUTEX_OP_CMP_GT 4 /* if (oldval > CMPARG) wake */
31#define FUTEX_OP_CMP_GE 5 /* if (oldval >= CMPARG) wake */
32
33/* FUTEX_WAKE_OP will perform atomically
34 int oldval = *(int *)UADDR2;
35 *(int *)UADDR2 = oldval OP OPARG;
36 if (oldval CMP CMPARG)
37 wake UADDR2; */
38
39#define FUTEX_OP(op, oparg, cmp, cmparg) \
40 (((op & 0xf) << 28) | ((cmp & 0xf) << 24) \
41 | ((oparg & 0xfff) << 12) | (cmparg & 0xfff))
42
17#endif 43#endif
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7c7400137e97..4dc990f3b5cc 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -40,6 +40,7 @@ struct vm_area_struct;
40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */ 40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
41#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ 41#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
42#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */ 42#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
43#define __GFP_HARDWALL 0x40000u /* Enforce hardwall cpuset memory allocs */
43 44
44#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ 45#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
45#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) 46#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
@@ -48,14 +49,15 @@ struct vm_area_struct;
48#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ 49#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
49 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ 50 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
50 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ 51 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
51 __GFP_NOMEMALLOC|__GFP_NORECLAIM) 52 __GFP_NOMEMALLOC|__GFP_NORECLAIM|__GFP_HARDWALL)
52 53
53#define GFP_ATOMIC (__GFP_HIGH) 54#define GFP_ATOMIC (__GFP_HIGH)
54#define GFP_NOIO (__GFP_WAIT) 55#define GFP_NOIO (__GFP_WAIT)
55#define GFP_NOFS (__GFP_WAIT | __GFP_IO) 56#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
56#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) 57#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
57#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS) 58#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
58#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM) 59#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
60 __GFP_HIGHMEM)
59 61
60/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some 62/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
61 platforms, used as appropriate on others */ 63 platforms, used as appropriate on others */
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 93bb3afe646b..ee5b239092ed 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -47,6 +47,7 @@ struct inotify_event {
47#define IN_MOVE (IN_MOVED_FROM | IN_MOVED_TO) /* moves */ 47#define IN_MOVE (IN_MOVED_FROM | IN_MOVED_TO) /* moves */
48 48
49/* special flags */ 49/* special flags */
50#define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */
50#define IN_ISDIR 0x40000000 /* event occurred against dir */ 51#define IN_ISDIR 0x40000000 /* event occurred against dir */
51#define IN_ONESHOT 0x80000000 /* only send event once */ 52#define IN_ONESHOT 0x80000000 /* only send event once */
52 53
diff --git a/include/linux/input.h b/include/linux/input.h
index bdc53c6cc962..4767e5429534 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -66,6 +66,7 @@ struct input_absinfo {
66#define EVIOCGKEY(len) _IOC(_IOC_READ, 'E', 0x18, len) /* get global keystate */ 66#define EVIOCGKEY(len) _IOC(_IOC_READ, 'E', 0x18, len) /* get global keystate */
67#define EVIOCGLED(len) _IOC(_IOC_READ, 'E', 0x19, len) /* get all LEDs */ 67#define EVIOCGLED(len) _IOC(_IOC_READ, 'E', 0x19, len) /* get all LEDs */
68#define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */ 68#define EVIOCGSND(len) _IOC(_IOC_READ, 'E', 0x1a, len) /* get all sounds status */
69#define EVIOCGSW(len) _IOC(_IOC_READ, 'E', 0x1b, len) /* get all switch states */
69 70
70#define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + ev, len) /* get event bits */ 71#define EVIOCGBIT(ev,len) _IOC(_IOC_READ, 'E', 0x20 + ev, len) /* get event bits */
71#define EVIOCGABS(abs) _IOR('E', 0x40 + abs, struct input_absinfo) /* get abs value/limits */ 72#define EVIOCGABS(abs) _IOR('E', 0x40 + abs, struct input_absinfo) /* get abs value/limits */
@@ -86,6 +87,7 @@ struct input_absinfo {
86#define EV_REL 0x02 87#define EV_REL 0x02
87#define EV_ABS 0x03 88#define EV_ABS 0x03
88#define EV_MSC 0x04 89#define EV_MSC 0x04
90#define EV_SW 0x05
89#define EV_LED 0x11 91#define EV_LED 0x11
90#define EV_SND 0x12 92#define EV_SND 0x12
91#define EV_REP 0x14 93#define EV_REP 0x14
@@ -551,6 +553,20 @@ struct input_absinfo {
551#define ABS_MAX 0x3f 553#define ABS_MAX 0x3f
552 554
553/* 555/*
556 * Switch events
557 */
558
559#define SW_0 0x00
560#define SW_1 0x01
561#define SW_2 0x02
562#define SW_3 0x03
563#define SW_4 0x04
564#define SW_5 0x05
565#define SW_6 0x06
566#define SW_7 0x07
567#define SW_MAX 0x0f
568
569/*
554 * Misc events 570 * Misc events
555 */ 571 */
556 572
@@ -824,6 +840,7 @@ struct input_dev {
824 unsigned long ledbit[NBITS(LED_MAX)]; 840 unsigned long ledbit[NBITS(LED_MAX)];
825 unsigned long sndbit[NBITS(SND_MAX)]; 841 unsigned long sndbit[NBITS(SND_MAX)];
826 unsigned long ffbit[NBITS(FF_MAX)]; 842 unsigned long ffbit[NBITS(FF_MAX)];
843 unsigned long swbit[NBITS(SW_MAX)];
827 int ff_effects_max; 844 int ff_effects_max;
828 845
829 unsigned int keycodemax; 846 unsigned int keycodemax;
@@ -844,6 +861,7 @@ struct input_dev {
844 unsigned long key[NBITS(KEY_MAX)]; 861 unsigned long key[NBITS(KEY_MAX)];
845 unsigned long led[NBITS(LED_MAX)]; 862 unsigned long led[NBITS(LED_MAX)];
846 unsigned long snd[NBITS(SND_MAX)]; 863 unsigned long snd[NBITS(SND_MAX)];
864 unsigned long sw[NBITS(SW_MAX)];
847 865
848 int absmax[ABS_MAX + 1]; 866 int absmax[ABS_MAX + 1];
849 int absmin[ABS_MAX + 1]; 867 int absmin[ABS_MAX + 1];
@@ -886,6 +904,7 @@ struct input_dev {
886#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x200 904#define INPUT_DEVICE_ID_MATCH_LEDBIT 0x200
887#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x400 905#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x400
888#define INPUT_DEVICE_ID_MATCH_FFBIT 0x800 906#define INPUT_DEVICE_ID_MATCH_FFBIT 0x800
907#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
889 908
890#define INPUT_DEVICE_ID_MATCH_DEVICE\ 909#define INPUT_DEVICE_ID_MATCH_DEVICE\
891 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) 910 (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
@@ -906,6 +925,7 @@ struct input_device_id {
906 unsigned long ledbit[NBITS(LED_MAX)]; 925 unsigned long ledbit[NBITS(LED_MAX)];
907 unsigned long sndbit[NBITS(SND_MAX)]; 926 unsigned long sndbit[NBITS(SND_MAX)];
908 unsigned long ffbit[NBITS(FF_MAX)]; 927 unsigned long ffbit[NBITS(FF_MAX)];
928 unsigned long swbit[NBITS(SW_MAX)];
909 929
910 unsigned long driver_info; 930 unsigned long driver_info;
911}; 931};
@@ -998,6 +1018,11 @@ static inline void input_report_ff_status(struct input_dev *dev, unsigned int co
998 input_event(dev, EV_FF_STATUS, code, value); 1018 input_event(dev, EV_FF_STATUS, code, value);
999} 1019}
1000 1020
1021static inline void input_report_switch(struct input_dev *dev, unsigned int code, int value)
1022{
1023 input_event(dev, EV_SW, code, !!value);
1024}
1025
1001static inline void input_regs(struct input_dev *dev, struct pt_regs *regs) 1026static inline void input_regs(struct input_dev *dev, struct pt_regs *regs)
1002{ 1027{
1003 dev->regs = regs; 1028 dev->regs = regs;
diff --git a/include/linux/ioctl32.h b/include/linux/ioctl32.h
index e8c4af32b3bb..948809d99917 100644
--- a/include/linux/ioctl32.h
+++ b/include/linux/ioctl32.h
@@ -14,26 +14,4 @@ struct ioctl_trans {
14 struct ioctl_trans *next; 14 struct ioctl_trans *next;
15}; 15};
16 16
17/*
18 * Register an 32bit ioctl translation handler for ioctl cmd.
19 *
20 * handler == NULL: use 64bit ioctl handler.
21 * arguments to handler: fd: file descriptor
22 * cmd: ioctl command.
23 * arg: ioctl argument
24 * struct file *file: file descriptor pointer.
25 */
26
27#ifdef CONFIG_COMPAT
28extern int __deprecated register_ioctl32_conversion(unsigned int cmd,
29 ioctl_trans_handler_t handler);
30extern int __deprecated unregister_ioctl32_conversion(unsigned int cmd);
31
32#else
33
34#define register_ioctl32_conversion(cmd, handler) ({ 0; })
35#define unregister_ioctl32_conversion(cmd) ({ 0; })
36
37#endif
38
39#endif 17#endif
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 596ca6130159..938d55b813a5 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -35,6 +35,7 @@
35#define __LINUX_IPMI_H 35#define __LINUX_IPMI_H
36 36
37#include <linux/ipmi_msgdefs.h> 37#include <linux/ipmi_msgdefs.h>
38#include <linux/compiler.h>
38 39
39/* 40/*
40 * This file describes an interface to an IPMI driver. You have to 41 * This file describes an interface to an IPMI driver. You have to
@@ -241,7 +242,8 @@ struct ipmi_recv_msg
241 /* The user_msg_data is the data supplied when a message was 242 /* The user_msg_data is the data supplied when a message was
242 sent, if this is a response to a sent message. If this is 243 sent, if this is a response to a sent message. If this is
243 not a response to a sent message, then user_msg_data will 244 not a response to a sent message, then user_msg_data will
244 be NULL. */ 245 be NULL. If the user above is NULL, then this will be the
246 intf. */
245 void *user_msg_data; 247 void *user_msg_data;
246 248
247 /* Call this when done with the message. It will presumably free 249 /* Call this when done with the message. It will presumably free
@@ -298,13 +300,19 @@ void ipmi_get_version(ipmi_user_t user,
298 this user, so it will affect all users of this interface. This is 300 this user, so it will affect all users of this interface. This is
299 so some initialization code can come in and do the OEM-specific 301 so some initialization code can come in and do the OEM-specific
300 things it takes to determine your address (if not the BMC) and set 302 things it takes to determine your address (if not the BMC) and set
301 it for everyone else. */ 303 it for everyone else. Note that each channel can have its own address. */
302void ipmi_set_my_address(ipmi_user_t user, 304int ipmi_set_my_address(ipmi_user_t user,
303 unsigned char address); 305 unsigned int channel,
304unsigned char ipmi_get_my_address(ipmi_user_t user); 306 unsigned char address);
305void ipmi_set_my_LUN(ipmi_user_t user, 307int ipmi_get_my_address(ipmi_user_t user,
306 unsigned char LUN); 308 unsigned int channel,
307unsigned char ipmi_get_my_LUN(ipmi_user_t user); 309 unsigned char *address);
310int ipmi_set_my_LUN(ipmi_user_t user,
311 unsigned int channel,
312 unsigned char LUN);
313int ipmi_get_my_LUN(ipmi_user_t user,
314 unsigned int channel,
315 unsigned char *LUN);
308 316
309/* 317/*
310 * Like ipmi_request, but lets you specify the number of retries and 318 * Like ipmi_request, but lets you specify the number of retries and
@@ -585,6 +593,16 @@ struct ipmi_cmdspec
585 * things it takes to determine your address (if not the BMC) and set 593 * things it takes to determine your address (if not the BMC) and set
586 * it for everyone else. You should probably leave the LUN alone. 594 * it for everyone else. You should probably leave the LUN alone.
587 */ 595 */
596struct ipmi_channel_lun_address_set
597{
598 unsigned short channel;
599 unsigned char value;
600};
601#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set)
602#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set)
603#define IPMICTL_SET_MY_CHANNEL_LUN_CMD _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set)
604#define IPMICTL_GET_MY_CHANNEL_LUN_CMD _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set)
605/* Legacy interfaces, these only set IPMB 0. */
588#define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int) 606#define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int)
589#define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int) 607#define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int)
590#define IPMICTL_SET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 19, unsigned int) 608#define IPMICTL_SET_MY_LUN_CMD _IOR(IPMI_IOC_MAGIC, 19, unsigned int)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 069d3b84d311..69681c3b1f05 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -32,7 +32,12 @@
32#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */ 32#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
33#define IRQ_LEVEL 64 /* IRQ level triggered */ 33#define IRQ_LEVEL 64 /* IRQ level triggered */
34#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */ 34#define IRQ_MASKED 128 /* IRQ masked - shouldn't be seen again */
35#define IRQ_PER_CPU 256 /* IRQ is per CPU */ 35#if defined(ARCH_HAS_IRQ_PER_CPU)
36# define IRQ_PER_CPU 256 /* IRQ is per CPU */
37# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
38#else
39# define CHECK_IRQ_PER_CPU(var) 0
40#endif
36 41
37/* 42/*
38 * Interrupt controller descriptor. This is all we need 43 * Interrupt controller descriptor. This is all we need
@@ -71,16 +76,139 @@ typedef struct irq_desc {
71 unsigned int irq_count; /* For detecting broken interrupts */ 76 unsigned int irq_count; /* For detecting broken interrupts */
72 unsigned int irqs_unhandled; 77 unsigned int irqs_unhandled;
73 spinlock_t lock; 78 spinlock_t lock;
79#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
80 unsigned int move_irq; /* Flag need to re-target intr dest*/
81#endif
74} ____cacheline_aligned irq_desc_t; 82} ____cacheline_aligned irq_desc_t;
75 83
76extern irq_desc_t irq_desc [NR_IRQS]; 84extern irq_desc_t irq_desc [NR_IRQS];
77 85
86/* Return a pointer to the irq descriptor for IRQ. */
87static inline irq_desc_t *
88irq_descp (int irq)
89{
90 return irq_desc + irq;
91}
92
78#include <asm/hw_irq.h> /* the arch dependent stuff */ 93#include <asm/hw_irq.h> /* the arch dependent stuff */
79 94
80extern int setup_irq(unsigned int irq, struct irqaction * new); 95extern int setup_irq(unsigned int irq, struct irqaction * new);
81 96
82#ifdef CONFIG_GENERIC_HARDIRQS 97#ifdef CONFIG_GENERIC_HARDIRQS
83extern cpumask_t irq_affinity[NR_IRQS]; 98extern cpumask_t irq_affinity[NR_IRQS];
99
100#ifdef CONFIG_SMP
101static inline void set_native_irq_info(int irq, cpumask_t mask)
102{
103 irq_affinity[irq] = mask;
104}
105#else
106static inline void set_native_irq_info(int irq, cpumask_t mask)
107{
108}
109#endif
110
111#ifdef CONFIG_SMP
112
113#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
114extern cpumask_t pending_irq_cpumask[NR_IRQS];
115
116static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
117{
118 irq_desc_t *desc = irq_desc + irq;
119 unsigned long flags;
120
121 spin_lock_irqsave(&desc->lock, flags);
122 desc->move_irq = 1;
123 pending_irq_cpumask[irq] = mask;
124 spin_unlock_irqrestore(&desc->lock, flags);
125}
126
127static inline void
128move_native_irq(int irq)
129{
130 cpumask_t tmp;
131 irq_desc_t *desc = irq_descp(irq);
132
133 if (likely (!desc->move_irq))
134 return;
135
136 desc->move_irq = 0;
137
138 if (likely(cpus_empty(pending_irq_cpumask[irq])))
139 return;
140
141 if (!desc->handler->set_affinity)
142 return;
143
144 /* note - we hold the desc->lock */
145 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
146
147 /*
148 * If there was a valid mask to work with, please
149 * do the disable, re-program, enable sequence.
150 * This is *not* particularly important for level triggered
151 * but in a edge trigger case, we might be setting rte
152 * when an active trigger is comming in. This could
153 * cause some ioapics to mal-function.
154 * Being paranoid i guess!
155 */
156 if (unlikely(!cpus_empty(tmp))) {
157 desc->handler->disable(irq);
158 desc->handler->set_affinity(irq,tmp);
159 desc->handler->enable(irq);
160 }
161 cpus_clear(pending_irq_cpumask[irq]);
162}
163
164#ifdef CONFIG_PCI_MSI
165/*
166 * Wonder why these are dummies?
167 * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq()
168 * counter part after translating the vector to irq info. We need to perform
169 * this operation on the real irq, when we dont use vector, i.e when
170 * pci_use_vector() is false.
171 */
172static inline void move_irq(int irq)
173{
174}
175
176static inline void set_irq_info(int irq, cpumask_t mask)
177{
178}
179
180#else // CONFIG_PCI_MSI
181
182static inline void move_irq(int irq)
183{
184 move_native_irq(irq);
185}
186
187static inline void set_irq_info(int irq, cpumask_t mask)
188{
189 set_native_irq_info(irq, mask);
190}
191#endif // CONFIG_PCI_MSI
192
193#else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE
194
195#define move_irq(x)
196#define move_native_irq(x)
197#define set_pending_irq(x,y)
198static inline void set_irq_info(int irq, cpumask_t mask)
199{
200 set_native_irq_info(irq, mask);
201}
202
203#endif // CONFIG_GENERIC_PENDING_IRQ
204
205#else // CONFIG_SMP
206
207#define move_irq(x)
208#define move_native_irq(x)
209
210#endif // CONFIG_SMP
211
84extern int no_irq_affinity; 212extern int no_irq_affinity;
85extern int noirqdebug_setup(char *str); 213extern int noirqdebug_setup(char *str);
86 214
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index 862083eb58ab..53eaee96065b 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -150,7 +150,6 @@ typedef struct {
150#include <linux/errno.h> 150#include <linux/errno.h>
151#include <linux/fs.h> 151#include <linux/fs.h>
152#include <linux/major.h> 152#include <linux/major.h>
153#include <asm/segment.h>
154#include <asm/io.h> 153#include <asm/io.h>
155#include <linux/kernel.h> 154#include <linux/kernel.h>
156#include <linux/signal.h> 155#include <linux/signal.h>
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 593407e865b1..84321a4cac93 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -914,7 +914,6 @@ extern int journal_wipe (journal_t *, int);
914extern int journal_skip_recovery (journal_t *); 914extern int journal_skip_recovery (journal_t *);
915extern void journal_update_superblock (journal_t *, int); 915extern void journal_update_superblock (journal_t *, int);
916extern void __journal_abort_hard (journal_t *); 916extern void __journal_abort_hard (journal_t *);
917extern void __journal_abort_soft (journal_t *, int);
918extern void journal_abort (journal_t *, int); 917extern void journal_abort (journal_t *, int);
919extern int journal_errno (journal_t *); 918extern int journal_errno (journal_t *);
920extern void journal_ack_err (journal_t *); 919extern void journal_ack_err (journal_t *);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e050fc2d4c26..e30afdca7917 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -42,6 +42,9 @@
42#define KPROBE_REENTER 0x00000004 42#define KPROBE_REENTER 0x00000004
43#define KPROBE_HIT_SSDONE 0x00000008 43#define KPROBE_HIT_SSDONE 0x00000008
44 44
45/* Attach to insert probes on any functions which should be ignored*/
46#define __kprobes __attribute__((__section__(".kprobes.text")))
47
45struct kprobe; 48struct kprobe;
46struct pt_regs; 49struct pt_regs;
47struct kretprobe; 50struct kretprobe;
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 338f7795d8a0..147eb01e0d4b 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -33,6 +33,13 @@
33 ALIGN; \ 33 ALIGN; \
34 name: 34 name:
35 35
36#define KPROBE_ENTRY(name) \
37 .section .kprobes.text, "ax"; \
38 .globl name; \
39 ALIGN; \
40 name:
41
42
36#endif 43#endif
37 44
38#define NORET_TYPE /**/ 45#define NORET_TYPE /**/
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index aefedf04b9bb..18fc77f682de 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -33,6 +33,13 @@ struct mmc_csd {
33 unsigned int capacity; 33 unsigned int capacity;
34}; 34};
35 35
36struct sd_scr {
37 unsigned char sda_vsn;
38 unsigned char bus_widths;
39#define SD_SCR_BUS_WIDTH_1 (1<<0)
40#define SD_SCR_BUS_WIDTH_4 (1<<2)
41};
42
36struct mmc_host; 43struct mmc_host;
37 44
38/* 45/*
@@ -47,19 +54,27 @@ struct mmc_card {
47#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ 54#define MMC_STATE_PRESENT (1<<0) /* present in sysfs */
48#define MMC_STATE_DEAD (1<<1) /* device no longer in stack */ 55#define MMC_STATE_DEAD (1<<1) /* device no longer in stack */
49#define MMC_STATE_BAD (1<<2) /* unrecognised device */ 56#define MMC_STATE_BAD (1<<2) /* unrecognised device */
57#define MMC_STATE_SDCARD (1<<3) /* is an SD card */
58#define MMC_STATE_READONLY (1<<4) /* card is read-only */
50 u32 raw_cid[4]; /* raw card CID */ 59 u32 raw_cid[4]; /* raw card CID */
51 u32 raw_csd[4]; /* raw card CSD */ 60 u32 raw_csd[4]; /* raw card CSD */
61 u32 raw_scr[2]; /* raw card SCR */
52 struct mmc_cid cid; /* card identification */ 62 struct mmc_cid cid; /* card identification */
53 struct mmc_csd csd; /* card specific */ 63 struct mmc_csd csd; /* card specific */
64 struct sd_scr scr; /* extra SD information */
54}; 65};
55 66
56#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT) 67#define mmc_card_present(c) ((c)->state & MMC_STATE_PRESENT)
57#define mmc_card_dead(c) ((c)->state & MMC_STATE_DEAD) 68#define mmc_card_dead(c) ((c)->state & MMC_STATE_DEAD)
58#define mmc_card_bad(c) ((c)->state & MMC_STATE_BAD) 69#define mmc_card_bad(c) ((c)->state & MMC_STATE_BAD)
70#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD)
71#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
59 72
60#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT) 73#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
61#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD) 74#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD)
62#define mmc_card_set_bad(c) ((c)->state |= MMC_STATE_BAD) 75#define mmc_card_set_bad(c) ((c)->state |= MMC_STATE_BAD)
76#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD)
77#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
63 78
64#define mmc_card_name(c) ((c)->cid.prod_name) 79#define mmc_card_name(c) ((c)->cid.prod_name)
65#define mmc_card_id(c) ((c)->dev.bus_id) 80#define mmc_card_id(c) ((c)->dev.bus_id)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 30f68c0c8c6e..6014160d9c06 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -57,11 +57,17 @@ struct mmc_ios {
57#define MMC_POWER_OFF 0 57#define MMC_POWER_OFF 0
58#define MMC_POWER_UP 1 58#define MMC_POWER_UP 1
59#define MMC_POWER_ON 2 59#define MMC_POWER_ON 2
60
61 unsigned char bus_width; /* data bus width */
62
63#define MMC_BUS_WIDTH_1 0
64#define MMC_BUS_WIDTH_4 2
60}; 65};
61 66
62struct mmc_host_ops { 67struct mmc_host_ops {
63 void (*request)(struct mmc_host *host, struct mmc_request *req); 68 void (*request)(struct mmc_host *host, struct mmc_request *req);
64 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios); 69 void (*set_ios)(struct mmc_host *host, struct mmc_ios *ios);
70 int (*get_ro)(struct mmc_host *host);
65}; 71};
66 72
67struct mmc_card; 73struct mmc_card;
@@ -76,6 +82,10 @@ struct mmc_host {
76 unsigned int f_max; 82 unsigned int f_max;
77 u32 ocr_avail; 83 u32 ocr_avail;
78 84
85 unsigned long caps; /* Host capabilities */
86
87#define MMC_CAP_4_BIT_DATA (1 << 0) /* Can the host do 4 bit transfers */
88
79 /* host specific block data */ 89 /* host specific block data */
80 unsigned int max_seg_size; /* see blk_queue_max_segment_size */ 90 unsigned int max_seg_size; /* see blk_queue_max_segment_size */
81 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */ 91 unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
@@ -87,6 +97,10 @@ struct mmc_host {
87 struct mmc_ios ios; /* current io bus settings */ 97 struct mmc_ios ios; /* current io bus settings */
88 u32 ocr; /* the current OCR setting */ 98 u32 ocr; /* the current OCR setting */
89 99
100 unsigned int mode; /* current card mode of host */
101#define MMC_MODE_MMC 0
102#define MMC_MODE_SD 1
103
90 struct list_head cards; /* devices attached to this host */ 104 struct list_head cards; /* devices attached to this host */
91 105
92 wait_queue_head_t wq; 106 wait_queue_head_t wq;
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 0d35d4ffb360..1ab78e8d6c53 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -88,6 +88,8 @@ struct mmc_card;
88 88
89extern int mmc_wait_for_req(struct mmc_host *, struct mmc_request *); 89extern int mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
90extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); 90extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
91extern int mmc_wait_for_app_cmd(struct mmc_host *, unsigned int,
92 struct mmc_command *, int);
91 93
92extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card); 94extern int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card);
93 95
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 896342817b97..f819cae92266 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -236,5 +236,12 @@ struct _mmc_csd {
236#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */ 236#define CSD_SPEC_VER_2 2 /* Implements system specification 2.0 - 2.2 */
237#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 */ 237#define CSD_SPEC_VER_3 3 /* Implements system specification 3.1 */
238 238
239
240/*
241 * SD bus widths
242 */
243#define SD_BUS_WIDTH_1 0
244#define SD_BUS_WIDTH_4 2
245
239#endif /* MMC_MMC_PROTOCOL_H */ 246#endif /* MMC_MMC_PROTOCOL_H */
240 247
diff --git a/include/linux/msg.h b/include/linux/msg.h
index 2c4c6aa643ff..903e0ab8101f 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -77,6 +77,7 @@ struct msg_msg {
77/* one msq_queue structure for each present queue on the system */ 77/* one msq_queue structure for each present queue on the system */
78struct msg_queue { 78struct msg_queue {
79 struct kern_ipc_perm q_perm; 79 struct kern_ipc_perm q_perm;
80 int q_id;
80 time_t q_stime; /* last msgsnd time */ 81 time_t q_stime; /* last msgsnd time */
81 time_t q_rtime; /* last msgrcv time */ 82 time_t q_rtime; /* last msgrcv time */
82 time_t q_ctime; /* last change time */ 83 time_t q_ctime; /* last change time */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index 088742befe49..7e033e9271a8 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -263,6 +263,9 @@ struct ip_conntrack_expect
263 /* Unique ID */ 263 /* Unique ID */
264 unsigned int id; 264 unsigned int id;
265 265
266 /* Flags */
267 unsigned int flags;
268
266#ifdef CONFIG_IP_NF_NAT_NEEDED 269#ifdef CONFIG_IP_NF_NAT_NEEDED
267 /* This is the original per-proto part, used to map the 270 /* This is the original per-proto part, used to map the
268 * expected connection the way the recipient expects. */ 271 * expected connection the way the recipient expects. */
@@ -272,6 +275,8 @@ struct ip_conntrack_expect
272#endif 275#endif
273}; 276};
274 277
278#define IP_CT_EXPECT_PERMANENT 0x1
279
275static inline struct ip_conntrack * 280static inline struct ip_conntrack *
276tuplehash_to_ctrack(const struct ip_conntrack_tuple_hash *hash) 281tuplehash_to_ctrack(const struct ip_conntrack_tuple_hash *hash)
277{ 282{
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_core.h b/include/linux/netfilter_ipv4/ip_conntrack_core.h
index dc4d2a0575de..907d4f5ca5dc 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_core.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_core.h
@@ -52,7 +52,7 @@ static inline int ip_conntrack_confirm(struct sk_buff **pskb)
52 return ret; 52 return ret;
53} 53}
54 54
55extern void __ip_ct_expect_unlink_destroy(struct ip_conntrack_expect *exp); 55extern void ip_ct_unlink_expect(struct ip_conntrack_expect *exp);
56 56
57extern struct list_head *ip_conntrack_hash; 57extern struct list_head *ip_conntrack_hash;
58extern struct list_head ip_conntrack_expect_list; 58extern struct list_head ip_conntrack_expect_list;
diff --git a/include/linux/netfilter_ipv4/ip_nat_rule.h b/include/linux/netfilter_ipv4/ip_nat_rule.h
index fecd2a06dcd8..73b9552e6a89 100644
--- a/include/linux/netfilter_ipv4/ip_nat_rule.h
+++ b/include/linux/netfilter_ipv4/ip_nat_rule.h
@@ -19,5 +19,10 @@ extern unsigned int
19alloc_null_binding(struct ip_conntrack *conntrack, 19alloc_null_binding(struct ip_conntrack *conntrack,
20 struct ip_nat_info *info, 20 struct ip_nat_info *info,
21 unsigned int hooknum); 21 unsigned int hooknum);
22
23extern unsigned int
24alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
25 struct ip_nat_info *info,
26 unsigned int hooknum);
22#endif 27#endif
23#endif /* _IP_NAT_RULE_H */ 28#endif /* _IP_NAT_RULE_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 95c941f8c747..ee0ab7a5f91b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1612,6 +1612,7 @@
1612#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030 1612#define PCI_DEVICE_ID_TOSHIBA_TC35815CF 0x0030
1613#define PCI_DEVICE_ID_TOSHIBA_TX4927 0x0180 1613#define PCI_DEVICE_ID_TOSHIBA_TX4927 0x0180
1614#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108 1614#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC 0x0108
1615#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
1615 1616
1616#define PCI_VENDOR_ID_RICOH 0x1180 1617#define PCI_VENDOR_ID_RICOH 0x1180
1617#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 1618#define PCI_DEVICE_ID_RICOH_RL5C465 0x0465
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 36725e7c02c6..1767073df26f 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -39,9 +39,6 @@ struct pipe_inode_info {
39 39
40#define PIPE_SEM(inode) (&(inode).i_sem) 40#define PIPE_SEM(inode) (&(inode).i_sem)
41#define PIPE_WAIT(inode) (&(inode).i_pipe->wait) 41#define PIPE_WAIT(inode) (&(inode).i_pipe->wait)
42#define PIPE_BASE(inode) ((inode).i_pipe->base)
43#define PIPE_START(inode) ((inode).i_pipe->start)
44#define PIPE_LEN(inode) ((inode).i_pipe->len)
45#define PIPE_READERS(inode) ((inode).i_pipe->readers) 42#define PIPE_READERS(inode) ((inode).i_pipe->readers)
46#define PIPE_WRITERS(inode) ((inode).i_pipe->writers) 43#define PIPE_WRITERS(inode) ((inode).i_pipe->writers)
47#define PIPE_WAITING_WRITERS(inode) ((inode).i_pipe->waiting_writers) 44#define PIPE_WAITING_WRITERS(inode) ((inode).i_pipe->waiting_writers)
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 5ec2bd0c2848..aadbac29103c 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -443,7 +443,7 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { ; }
443#define pnp_info(format, arg...) printk(KERN_INFO "pnp: " format "\n" , ## arg) 443#define pnp_info(format, arg...) printk(KERN_INFO "pnp: " format "\n" , ## arg)
444#define pnp_warn(format, arg...) printk(KERN_WARNING "pnp: " format "\n" , ## arg) 444#define pnp_warn(format, arg...) printk(KERN_WARNING "pnp: " format "\n" , ## arg)
445 445
446#ifdef DEBUG 446#ifdef CONFIG_PNP_DEBUG
447#define pnp_dbg(format, arg...) printk(KERN_DEBUG "pnp: " format "\n" , ## arg) 447#define pnp_dbg(format, arg...) printk(KERN_DEBUG "pnp: " format "\n" , ## arg)
448#else 448#else
449#define pnp_dbg(format, arg...) do {} while (0) 449#define pnp_dbg(format, arg...) do {} while (0)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 2afdafb62123..dc6f3647bfbc 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -90,6 +90,7 @@ extern void __ptrace_link(struct task_struct *child,
90 struct task_struct *new_parent); 90 struct task_struct *new_parent);
91extern void __ptrace_unlink(struct task_struct *child); 91extern void __ptrace_unlink(struct task_struct *child);
92extern void ptrace_untrace(struct task_struct *child); 92extern void ptrace_untrace(struct task_struct *child);
93extern int ptrace_may_attach(struct task_struct *task);
93 94
94static inline void ptrace_link(struct task_struct *child, 95static inline void ptrace_link(struct task_struct *child,
95 struct task_struct *new_parent) 96 struct task_struct *new_parent)
diff --git a/include/linux/relayfs_fs.h b/include/linux/relayfs_fs.h
new file mode 100644
index 000000000000..cfafc3e76bc2
--- /dev/null
+++ b/include/linux/relayfs_fs.h
@@ -0,0 +1,255 @@
1/*
2 * linux/include/linux/relayfs_fs.h
3 *
4 * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp
5 * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com)
6 *
7 * RelayFS definitions and declarations
8 */
9
10#ifndef _LINUX_RELAYFS_FS_H
11#define _LINUX_RELAYFS_FS_H
12
13#include <linux/config.h>
14#include <linux/types.h>
15#include <linux/sched.h>
16#include <linux/wait.h>
17#include <linux/list.h>
18#include <linux/fs.h>
19#include <linux/poll.h>
20#include <linux/kref.h>
21
22/*
23 * Tracks changes to rchan_buf struct
24 */
25#define RELAYFS_CHANNEL_VERSION 5
26
27/*
28 * Per-cpu relay channel buffer
29 */
30struct rchan_buf
31{
32 void *start; /* start of channel buffer */
33 void *data; /* start of current sub-buffer */
34 size_t offset; /* current offset into sub-buffer */
35 size_t subbufs_produced; /* count of sub-buffers produced */
36 size_t subbufs_consumed; /* count of sub-buffers consumed */
37 struct rchan *chan; /* associated channel */
38 wait_queue_head_t read_wait; /* reader wait queue */
39 struct work_struct wake_readers; /* reader wake-up work struct */
40 struct dentry *dentry; /* channel file dentry */
41 struct kref kref; /* channel buffer refcount */
42 struct page **page_array; /* array of current buffer pages */
43 unsigned int page_count; /* number of current buffer pages */
44 unsigned int finalized; /* buffer has been finalized */
45 size_t *padding; /* padding counts per sub-buffer */
46 size_t prev_padding; /* temporary variable */
47 size_t bytes_consumed; /* bytes consumed in cur read subbuf */
48 unsigned int cpu; /* this buf's cpu */
49} ____cacheline_aligned;
50
51/*
52 * Relay channel data structure
53 */
54struct rchan
55{
56 u32 version; /* the version of this struct */
57 size_t subbuf_size; /* sub-buffer size */
58 size_t n_subbufs; /* number of sub-buffers per buffer */
59 size_t alloc_size; /* total buffer size allocated */
60 struct rchan_callbacks *cb; /* client callbacks */
61 struct kref kref; /* channel refcount */
62 void *private_data; /* for user-defined data */
63 struct rchan_buf *buf[NR_CPUS]; /* per-cpu channel buffers */
64};
65
66/*
67 * Relayfs inode
68 */
69struct relayfs_inode_info
70{
71 struct inode vfs_inode;
72 struct rchan_buf *buf;
73};
74
75static inline struct relayfs_inode_info *RELAYFS_I(struct inode *inode)
76{
77 return container_of(inode, struct relayfs_inode_info, vfs_inode);
78}
79
80/*
81 * Relay channel client callbacks
82 */
83struct rchan_callbacks
84{
85 /*
86 * subbuf_start - called on buffer-switch to a new sub-buffer
87 * @buf: the channel buffer containing the new sub-buffer
88 * @subbuf: the start of the new sub-buffer
89 * @prev_subbuf: the start of the previous sub-buffer
90 * @prev_padding: unused space at the end of previous sub-buffer
91 *
92 * The client should return 1 to continue logging, 0 to stop
93 * logging.
94 *
95 * NOTE: subbuf_start will also be invoked when the buffer is
96 * created, so that the first sub-buffer can be initialized
97 * if necessary. In this case, prev_subbuf will be NULL.
98 *
99 * NOTE: the client can reserve bytes at the beginning of the new
100 * sub-buffer by calling subbuf_start_reserve() in this callback.
101 */
102 int (*subbuf_start) (struct rchan_buf *buf,
103 void *subbuf,
104 void *prev_subbuf,
105 size_t prev_padding);
106
107 /*
108 * buf_mapped - relayfs buffer mmap notification
109 * @buf: the channel buffer
110 * @filp: relayfs file pointer
111 *
112 * Called when a relayfs file is successfully mmapped
113 */
114 void (*buf_mapped)(struct rchan_buf *buf,
115 struct file *filp);
116
117 /*
118 * buf_unmapped - relayfs buffer unmap notification
119 * @buf: the channel buffer
120 * @filp: relayfs file pointer
121 *
122 * Called when a relayfs file is successfully unmapped
123 */
124 void (*buf_unmapped)(struct rchan_buf *buf,
125 struct file *filp);
126};
127
128/*
129 * relayfs kernel API, fs/relayfs/relay.c
130 */
131
132struct rchan *relay_open(const char *base_filename,
133 struct dentry *parent,
134 size_t subbuf_size,
135 size_t n_subbufs,
136 struct rchan_callbacks *cb);
137extern void relay_close(struct rchan *chan);
138extern void relay_flush(struct rchan *chan);
139extern void relay_subbufs_consumed(struct rchan *chan,
140 unsigned int cpu,
141 size_t consumed);
142extern void relay_reset(struct rchan *chan);
143extern int relay_buf_full(struct rchan_buf *buf);
144
145extern size_t relay_switch_subbuf(struct rchan_buf *buf,
146 size_t length);
147extern struct dentry *relayfs_create_dir(const char *name,
148 struct dentry *parent);
149extern int relayfs_remove_dir(struct dentry *dentry);
150
151/**
152 * relay_write - write data into the channel
153 * @chan: relay channel
154 * @data: data to be written
155 * @length: number of bytes to write
156 *
157 * Writes data into the current cpu's channel buffer.
158 *
159 * Protects the buffer by disabling interrupts. Use this
160 * if you might be logging from interrupt context. Try
161 * __relay_write() if you know you won't be logging from
162 * interrupt context.
163 */
164static inline void relay_write(struct rchan *chan,
165 const void *data,
166 size_t length)
167{
168 unsigned long flags;
169 struct rchan_buf *buf;
170
171 local_irq_save(flags);
172 buf = chan->buf[smp_processor_id()];
173 if (unlikely(buf->offset + length > chan->subbuf_size))
174 length = relay_switch_subbuf(buf, length);
175 memcpy(buf->data + buf->offset, data, length);
176 buf->offset += length;
177 local_irq_restore(flags);
178}
179
180/**
181 * __relay_write - write data into the channel
182 * @chan: relay channel
183 * @data: data to be written
184 * @length: number of bytes to write
185 *
186 * Writes data into the current cpu's channel buffer.
187 *
188 * Protects the buffer by disabling preemption. Use
189 * relay_write() if you might be logging from interrupt
190 * context.
191 */
192static inline void __relay_write(struct rchan *chan,
193 const void *data,
194 size_t length)
195{
196 struct rchan_buf *buf;
197
198 buf = chan->buf[get_cpu()];
199 if (unlikely(buf->offset + length > buf->chan->subbuf_size))
200 length = relay_switch_subbuf(buf, length);
201 memcpy(buf->data + buf->offset, data, length);
202 buf->offset += length;
203 put_cpu();
204}
205
206/**
207 * relay_reserve - reserve slot in channel buffer
208 * @chan: relay channel
209 * @length: number of bytes to reserve
210 *
211 * Returns pointer to reserved slot, NULL if full.
212 *
213 * Reserves a slot in the current cpu's channel buffer.
214 * Does not protect the buffer at all - caller must provide
215 * appropriate synchronization.
216 */
217static inline void *relay_reserve(struct rchan *chan, size_t length)
218{
219 void *reserved;
220 struct rchan_buf *buf = chan->buf[smp_processor_id()];
221
222 if (unlikely(buf->offset + length > buf->chan->subbuf_size)) {
223 length = relay_switch_subbuf(buf, length);
224 if (!length)
225 return NULL;
226 }
227 reserved = buf->data + buf->offset;
228 buf->offset += length;
229
230 return reserved;
231}
232
233/**
234 * subbuf_start_reserve - reserve bytes at the start of a sub-buffer
235 * @buf: relay channel buffer
236 * @length: number of bytes to reserve
237 *
238 * Helper function used to reserve bytes at the beginning of
239 * a sub-buffer in the subbuf_start() callback.
240 */
241static inline void subbuf_start_reserve(struct rchan_buf *buf,
242 size_t length)
243{
244 BUG_ON(length >= buf->chan->subbuf_size - 1);
245 buf->offset = length;
246}
247
248/*
249 * exported relayfs file operations, fs/relayfs/inode.c
250 */
251
252extern struct file_operations relayfs_file_operations;
253
254#endif /* _LINUX_RELAYFS_FS_H */
255
diff --git a/include/linux/sched.h b/include/linux/sched.h
index dec5827c7742..ea1b5f32ec5c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -35,6 +35,8 @@
35#include <linux/topology.h> 35#include <linux/topology.h>
36#include <linux/seccomp.h> 36#include <linux/seccomp.h>
37 37
38#include <linux/auxvec.h> /* For AT_VECTOR_SIZE */
39
38struct exec_domain; 40struct exec_domain;
39 41
40/* 42/*
@@ -176,6 +178,23 @@ extern void trap_init(void);
176extern void update_process_times(int user); 178extern void update_process_times(int user);
177extern void scheduler_tick(void); 179extern void scheduler_tick(void);
178 180
181#ifdef CONFIG_DETECT_SOFTLOCKUP
182extern void softlockup_tick(struct pt_regs *regs);
183extern void spawn_softlockup_task(void);
184extern void touch_softlockup_watchdog(void);
185#else
186static inline void softlockup_tick(struct pt_regs *regs)
187{
188}
189static inline void spawn_softlockup_task(void)
190{
191}
192static inline void touch_softlockup_watchdog(void)
193{
194}
195#endif
196
197
179/* Attach to any functions which should be ignored in wchan output. */ 198/* Attach to any functions which should be ignored in wchan output. */
180#define __sched __attribute__((__section__(".sched.text"))) 199#define __sched __attribute__((__section__(".sched.text")))
181/* Is this address in the __sched functions? */ 200/* Is this address in the __sched functions? */
@@ -244,7 +263,7 @@ struct mm_struct {
244 mm_counter_t _rss; 263 mm_counter_t _rss;
245 mm_counter_t _anon_rss; 264 mm_counter_t _anon_rss;
246 265
247 unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ 266 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
248 267
249 unsigned dumpable:2; 268 unsigned dumpable:2;
250 cpumask_t cpu_vm_mask; 269 cpumask_t cpu_vm_mask;
@@ -545,13 +564,6 @@ struct sched_domain {
545 564
546extern void partition_sched_domains(cpumask_t *partition1, 565extern void partition_sched_domains(cpumask_t *partition1,
547 cpumask_t *partition2); 566 cpumask_t *partition2);
548#ifdef ARCH_HAS_SCHED_DOMAIN
549/* Useful helpers that arch setup code may use. Defined in kernel/sched.c */
550extern cpumask_t cpu_isolated_map;
551extern void init_sched_build_groups(struct sched_group groups[],
552 cpumask_t span, int (*group_fn)(int cpu));
553extern void cpu_attach_domain(struct sched_domain *sd, int cpu);
554#endif /* ARCH_HAS_SCHED_DOMAIN */
555#endif /* CONFIG_SMP */ 567#endif /* CONFIG_SMP */
556 568
557 569
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 2d8516be9fd7..106f9757339a 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -88,6 +88,7 @@ struct sem {
88/* One sem_array data structure for each set of semaphores in the system. */ 88/* One sem_array data structure for each set of semaphores in the system. */
89struct sem_array { 89struct sem_array {
90 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */ 90 struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
91 int sem_id;
91 time_t sem_otime; /* last semop time */ 92 time_t sem_otime; /* last semop time */
92 time_t sem_ctime; /* last change time */ 93 time_t sem_ctime; /* last change time */
93 struct sem *sem_base; /* ptr to first semaphore in array */ 94 struct sem *sem_base; /* ptr to first semaphore in array */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index cf0f64ea2bc0..9b12fe731612 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -385,11 +385,11 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
385/* 385/*
386 * The following are helper functions for the low level drivers. 386 * The following are helper functions for the low level drivers.
387 */ 387 */
388#ifdef SUPPORT_SYSRQ
389static inline int 388static inline int
390uart_handle_sysrq_char(struct uart_port *port, unsigned int ch, 389uart_handle_sysrq_char(struct uart_port *port, unsigned int ch,
391 struct pt_regs *regs) 390 struct pt_regs *regs)
392{ 391{
392#ifdef SUPPORT_SYSRQ
393 if (port->sysrq) { 393 if (port->sysrq) {
394 if (ch && time_before(jiffies, port->sysrq)) { 394 if (ch && time_before(jiffies, port->sysrq)) {
395 handle_sysrq(ch, regs, NULL); 395 handle_sysrq(ch, regs, NULL);
@@ -398,11 +398,9 @@ uart_handle_sysrq_char(struct uart_port *port, unsigned int ch,
398 } 398 }
399 port->sysrq = 0; 399 port->sysrq = 0;
400 } 400 }
401#endif
401 return 0; 402 return 0;
402} 403}
403#else
404#define uart_handle_sysrq_char(port,ch,regs) (0)
405#endif
406 404
407/* 405/*
408 * We do the SysRQ and SAK checking like this... 406 * We do the SysRQ and SAK checking like this...
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 42edce6abe23..da7da9c0ed1b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1251,7 +1251,7 @@ extern void skb_add_mtu(int mtu);
1251 * This function converts the offset back to a struct timeval and stores 1251 * This function converts the offset back to a struct timeval and stores
1252 * it in stamp. 1252 * it in stamp.
1253 */ 1253 */
1254static inline void skb_get_timestamp(struct sk_buff *skb, struct timeval *stamp) 1254static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
1255{ 1255{
1256 stamp->tv_sec = skb->tstamp.off_sec; 1256 stamp->tv_sec = skb->tstamp.off_sec;
1257 stamp->tv_usec = skb->tstamp.off_usec; 1257 stamp->tv_usec = skb->tstamp.off_usec;
@@ -1270,7 +1270,7 @@ static inline void skb_get_timestamp(struct sk_buff *skb, struct timeval *stamp)
1270 * This function converts a struct timeval to an offset and stores 1270 * This function converts a struct timeval to an offset and stores
1271 * it in the skb. 1271 * it in the skb.
1272 */ 1272 */
1273static inline void skb_set_timestamp(struct sk_buff *skb, struct timeval *stamp) 1273static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
1274{ 1274{
1275 skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec; 1275 skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec;
1276 skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec; 1276 skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 80b2dfde2e80..42a6bea58af3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -99,7 +99,21 @@ found:
99 return __kmalloc(size, flags); 99 return __kmalloc(size, flags);
100} 100}
101 101
102extern void *kcalloc(size_t, size_t, unsigned int __nocast); 102extern void *kzalloc(size_t, unsigned int __nocast);
103
104/**
105 * kcalloc - allocate memory for an array. The memory is set to zero.
106 * @n: number of elements.
107 * @size: element size.
108 * @flags: the type of memory to allocate.
109 */
110static inline void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
111{
112 if (n != 0 && size > INT_MAX / n)
113 return NULL;
114 return kzalloc(n * size, flags);
115}
116
103extern void kfree(const void *); 117extern void kfree(const void *);
104extern unsigned int ksize(const void *); 118extern unsigned int ksize(const void *);
105 119
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index 768cbba617d0..f56d24734950 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -99,6 +99,8 @@
99#define SONYPI_EVENT_BATTERY_INSERT 57 99#define SONYPI_EVENT_BATTERY_INSERT 57
100#define SONYPI_EVENT_BATTERY_REMOVE 58 100#define SONYPI_EVENT_BATTERY_REMOVE 58
101#define SONYPI_EVENT_FNKEY_RELEASED 59 101#define SONYPI_EVENT_FNKEY_RELEASED 59
102#define SONYPI_EVENT_WIRELESS_ON 60
103#define SONYPI_EVENT_WIRELESS_OFF 61
102 104
103/* get/set brightness */ 105/* get/set brightness */
104#define SONYPI_IOCGBRT _IOR('v', 0, __u8) 106#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 6864063d1b9f..c4e3ea7cf154 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -60,6 +60,7 @@ struct cache_head {
60#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */ 60#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
61 61
62struct cache_detail { 62struct cache_detail {
63 struct module * owner;
63 int hash_size; 64 int hash_size;
64 struct cache_head ** hash_table; 65 struct cache_head ** hash_table;
65 rwlock_t hash_lock; 66 rwlock_t hash_lock;
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index e82be96d4906..532a6c5c24e9 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -711,6 +711,7 @@ enum {
711 DEV_RAID=4, 711 DEV_RAID=4,
712 DEV_MAC_HID=5, 712 DEV_MAC_HID=5,
713 DEV_SCSI=6, 713 DEV_SCSI=6,
714 DEV_IPMI=7,
714}; 715};
715 716
716/* /proc/sys/dev/cdrom */ 717/* /proc/sys/dev/cdrom */
@@ -776,6 +777,11 @@ enum {
776 DEV_SCSI_LOGGING_LEVEL=1, 777 DEV_SCSI_LOGGING_LEVEL=1,
777}; 778};
778 779
780/* /proc/sys/dev/ipmi */
781enum {
782 DEV_IPMI_POWEROFF_POWERCYCLE=1,
783};
784
779/* /proc/sys/abi */ 785/* /proc/sys/abi */
780enum 786enum
781{ 787{
diff --git a/include/linux/time.h b/include/linux/time.h
index 5634497ff5df..c10d4c21c183 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -97,7 +97,6 @@ extern int do_settimeofday(struct timespec *tv);
97extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); 97extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
98extern void clock_was_set(void); // call when ever the clock is set 98extern void clock_was_set(void); // call when ever the clock is set
99extern int do_posix_clock_monotonic_gettime(struct timespec *tp); 99extern int do_posix_clock_monotonic_gettime(struct timespec *tp);
100extern long do_nanosleep(struct timespec *t);
101extern long do_utimes(char __user * filename, struct timeval * times); 100extern long do_utimes(char __user * filename, struct timeval * times);
102struct itimerval; 101struct itimerval;
103extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); 102extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 74fdd07d3792..7e050a2cc35b 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -260,6 +260,29 @@ extern long pps_calcnt; /* calibration intervals */
260extern long pps_errcnt; /* calibration errors */ 260extern long pps_errcnt; /* calibration errors */
261extern long pps_stbcnt; /* stability limit exceeded */ 261extern long pps_stbcnt; /* stability limit exceeded */
262 262
263/**
264 * ntp_clear - Clears the NTP state variables
265 *
266 * Must be called while holding a write on the xtime_lock
267 */
268static inline void ntp_clear(void)
269{
270 time_adjust = 0; /* stop active adjtime() */
271 time_status |= STA_UNSYNC;
272 time_maxerror = NTP_PHASE_LIMIT;
273 time_esterror = NTP_PHASE_LIMIT;
274}
275
276/**
277 * ntp_synced - Returns 1 if the NTP status is not UNSYNC
278 *
279 */
280static inline int ntp_synced(void)
281{
282 return !(time_status & STA_UNSYNC);
283}
284
285
263#ifdef CONFIG_TIME_INTERPOLATION 286#ifdef CONFIG_TIME_INTERPOLATION
264 287
265#define TIME_SOURCE_CPU 0 288#define TIME_SOURCE_CPU 0
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 0320225e96da..3df1d474e5c5 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -135,6 +135,29 @@
135} 135}
136#endif 136#endif
137 137
138/* sched_domains SD_ALLNODES_INIT for NUMA machines */
139#define SD_ALLNODES_INIT (struct sched_domain) { \
140 .span = CPU_MASK_NONE, \
141 .parent = NULL, \
142 .groups = NULL, \
143 .min_interval = 64, \
144 .max_interval = 64*num_online_cpus(), \
145 .busy_factor = 128, \
146 .imbalance_pct = 133, \
147 .cache_hot_time = (10*1000000), \
148 .cache_nice_tries = 1, \
149 .busy_idx = 3, \
150 .idle_idx = 3, \
151 .newidle_idx = 0, /* unused */ \
152 .wake_idx = 0, /* unused */ \
153 .forkexec_idx = 0, /* unused */ \
154 .per_cpu_gain = 100, \
155 .flags = SD_LOAD_BALANCE, \
156 .last_balance = jiffies, \
157 .balance_interval = 64, \
158 .nr_balance_failed = 0, \
159}
160
138#ifdef CONFIG_NUMA 161#ifdef CONFIG_NUMA
139#ifndef SD_NODE_INIT 162#ifndef SD_NODE_INIT
140#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! 163#error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!!
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index ae485f9c916e..a555a0f7a7b4 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file define a set of standard wireless extensions 2 * This file define a set of standard wireless extensions
3 * 3 *
4 * Version : 18 12.3.05 4 * Version : 19 18.3.05
5 * 5 *
6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> 6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
7 * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved. 7 * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved.
@@ -69,8 +69,6 @@
69 69
70/***************************** INCLUDES *****************************/ 70/***************************** INCLUDES *****************************/
71 71
72/* To minimise problems in user space, I might remove those headers
73 * at some point. Jean II */
74#include <linux/types.h> /* for "caddr_t" et al */ 72#include <linux/types.h> /* for "caddr_t" et al */
75#include <linux/socket.h> /* for "struct sockaddr" et al */ 73#include <linux/socket.h> /* for "struct sockaddr" et al */
76#include <linux/if.h> /* for IFNAMSIZ and co... */ 74#include <linux/if.h> /* for IFNAMSIZ and co... */
@@ -82,7 +80,7 @@
82 * (there is some stuff that will be added in the future...) 80 * (there is some stuff that will be added in the future...)
83 * I just plan to increment with each new version. 81 * I just plan to increment with each new version.
84 */ 82 */
85#define WIRELESS_EXT 18 83#define WIRELESS_EXT 19
86 84
87/* 85/*
88 * Changes : 86 * Changes :
@@ -197,6 +195,15 @@
197 * related parameters (extensible up to 4096 parameter values) 195 * related parameters (extensible up to 4096 parameter values)
198 * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE, 196 * - Add wireless events: IWEVGENIE, IWEVMICHAELMICFAILURE,
199 * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND 197 * IWEVASSOCREQIE, IWEVASSOCRESPIE, IWEVPMKIDCAND
198 *
199 * V18 to V19
200 * ----------
201 * - Remove (struct iw_point *)->pointer from events and streams
202 * - Remove header includes to help user space
203 * - Increase IW_ENCODING_TOKEN_MAX from 32 to 64
204 * - Add IW_QUAL_ALL_UPDATED and IW_QUAL_ALL_INVALID macros
205 * - Add explicit flag to tell stats are in dBm : IW_QUAL_DBM
206 * - Add IW_IOCTL_IDX() and IW_EVENT_IDX() macros
200 */ 207 */
201 208
202/**************************** CONSTANTS ****************************/ 209/**************************** CONSTANTS ****************************/
@@ -322,6 +329,7 @@
322/* The first and the last (range) */ 329/* The first and the last (range) */
323#define SIOCIWFIRST 0x8B00 330#define SIOCIWFIRST 0x8B00
324#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */ 331#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */
332#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
325 333
326/* Even : get (world access), odd : set (root access) */ 334/* Even : get (world access), odd : set (root access) */
327#define IW_IS_SET(cmd) (!((cmd) & 0x1)) 335#define IW_IS_SET(cmd) (!((cmd) & 0x1))
@@ -366,6 +374,7 @@
366 * (struct iw_pmkid_cand) */ 374 * (struct iw_pmkid_cand) */
367 375
368#define IWEVFIRST 0x8C00 376#define IWEVFIRST 0x8C00
377#define IW_EVENT_IDX(cmd) ((cmd) - IWEVFIRST)
369 378
370/* ------------------------- PRIVATE INFO ------------------------- */ 379/* ------------------------- PRIVATE INFO ------------------------- */
371/* 380/*
@@ -427,12 +436,15 @@
427#define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */ 436#define IW_MODE_MONITOR 6 /* Passive monitor (listen only) */
428 437
429/* Statistics flags (bitmask in updated) */ 438/* Statistics flags (bitmask in updated) */
430#define IW_QUAL_QUAL_UPDATED 0x1 /* Value was updated since last read */ 439#define IW_QUAL_QUAL_UPDATED 0x01 /* Value was updated since last read */
431#define IW_QUAL_LEVEL_UPDATED 0x2 440#define IW_QUAL_LEVEL_UPDATED 0x02
432#define IW_QUAL_NOISE_UPDATED 0x4 441#define IW_QUAL_NOISE_UPDATED 0x04
442#define IW_QUAL_ALL_UPDATED 0x07
443#define IW_QUAL_DBM 0x08 /* Level + Noise are dBm */
433#define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */ 444#define IW_QUAL_QUAL_INVALID 0x10 /* Driver doesn't provide value */
434#define IW_QUAL_LEVEL_INVALID 0x20 445#define IW_QUAL_LEVEL_INVALID 0x20
435#define IW_QUAL_NOISE_INVALID 0x40 446#define IW_QUAL_NOISE_INVALID 0x40
447#define IW_QUAL_ALL_INVALID 0x70
436 448
437/* Frequency flags */ 449/* Frequency flags */
438#define IW_FREQ_AUTO 0x00 /* Let the driver decides */ 450#define IW_FREQ_AUTO 0x00 /* Let the driver decides */
@@ -443,7 +455,7 @@
443#define IW_MAX_ENCODING_SIZES 8 455#define IW_MAX_ENCODING_SIZES 8
444 456
445/* Maximum size of the encoding token in bytes */ 457/* Maximum size of the encoding token in bytes */
446#define IW_ENCODING_TOKEN_MAX 32 /* 256 bits (for now) */ 458#define IW_ENCODING_TOKEN_MAX 64 /* 512 bits (for now) */
447 459
448/* Flags for encoding (along with the token) */ 460/* Flags for encoding (along with the token) */
449#define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */ 461#define IW_ENCODE_INDEX 0x00FF /* Token index (if needed) */
@@ -1039,12 +1051,16 @@ struct iw_event
1039#define IW_EV_CHAR_LEN (IW_EV_LCP_LEN + IFNAMSIZ) 1051#define IW_EV_CHAR_LEN (IW_EV_LCP_LEN + IFNAMSIZ)
1040#define IW_EV_UINT_LEN (IW_EV_LCP_LEN + sizeof(__u32)) 1052#define IW_EV_UINT_LEN (IW_EV_LCP_LEN + sizeof(__u32))
1041#define IW_EV_FREQ_LEN (IW_EV_LCP_LEN + sizeof(struct iw_freq)) 1053#define IW_EV_FREQ_LEN (IW_EV_LCP_LEN + sizeof(struct iw_freq))
1042#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point))
1043#define IW_EV_PARAM_LEN (IW_EV_LCP_LEN + sizeof(struct iw_param)) 1054#define IW_EV_PARAM_LEN (IW_EV_LCP_LEN + sizeof(struct iw_param))
1044#define IW_EV_ADDR_LEN (IW_EV_LCP_LEN + sizeof(struct sockaddr)) 1055#define IW_EV_ADDR_LEN (IW_EV_LCP_LEN + sizeof(struct sockaddr))
1045#define IW_EV_QUAL_LEN (IW_EV_LCP_LEN + sizeof(struct iw_quality)) 1056#define IW_EV_QUAL_LEN (IW_EV_LCP_LEN + sizeof(struct iw_quality))
1046 1057
1047/* Note : in the case of iw_point, the extra data will come at the 1058/* iw_point events are special. First, the payload (extra data) come at
1048 * end of the event */ 1059 * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second,
1060 * we omit the pointer, so start at an offset. */
1061#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \
1062 (char *) NULL)
1063#define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
1064 IW_EV_POINT_OFF)
1049 1065
1050#endif /* _LINUX_WIRELESS_H */ 1066#endif /* _LINUX_WIRELESS_H */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 926eed543023..364b046e9f47 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -257,7 +257,7 @@ extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
257 257
258/* ax25_addr.c */ 258/* ax25_addr.c */
259extern ax25_address null_ax25_address; 259extern ax25_address null_ax25_address;
260extern char *ax2asc(ax25_address *); 260extern char *ax2asc(char *buf, ax25_address *);
261extern ax25_address *asc2ax(char *); 261extern ax25_address *asc2ax(char *);
262extern int ax25cmp(ax25_address *, ax25_address *); 262extern int ax25cmp(ax25_address *, ax25_address *);
263extern int ax25digicmp(ax25_digi *, ax25_digi *); 263extern int ax25digicmp(ax25_digi *, ax25_digi *);
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index 44edd48f1234..d67c8393a343 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -1,10 +1,10 @@
1/* 1/*
2 * This file define the new driver API for Wireless Extensions 2 * This file define the new driver API for Wireless Extensions
3 * 3 *
4 * Version : 6 21.6.04 4 * Version : 7 18.3.05
5 * 5 *
6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> 6 * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
7 * Copyright (c) 2001-2004 Jean Tourrilhes, All Rights Reserved. 7 * Copyright (c) 2001-2005 Jean Tourrilhes, All Rights Reserved.
8 */ 8 */
9 9
10#ifndef _IW_HANDLER_H 10#ifndef _IW_HANDLER_H
@@ -207,7 +207,7 @@
207 * will be needed... 207 * will be needed...
208 * I just plan to increment with each new version. 208 * I just plan to increment with each new version.
209 */ 209 */
210#define IW_HANDLER_VERSION 6 210#define IW_HANDLER_VERSION 7
211 211
212/* 212/*
213 * Changes : 213 * Changes :
@@ -232,6 +232,13 @@
232 * - Remove spy #ifdef, they are always on -> cleaner code 232 * - Remove spy #ifdef, they are always on -> cleaner code
233 * - Add IW_DESCR_FLAG_NOMAX flag for very large requests 233 * - Add IW_DESCR_FLAG_NOMAX flag for very large requests
234 * - Start migrating get_wireless_stats to struct iw_handler_def 234 * - Start migrating get_wireless_stats to struct iw_handler_def
235 *
236 * V6 to V7
237 * --------
238 * - Add struct ieee80211_device pointer in struct iw_public_data
239 * - Remove (struct iw_point *)->pointer from events and streams
240 * - Remove spy_offset from struct iw_handler_def
241 * - Add "check" version of event macros for ieee802.11 stack
235 */ 242 */
236 243
237/**************************** CONSTANTS ****************************/ 244/**************************** CONSTANTS ****************************/
@@ -334,9 +341,6 @@ struct iw_handler_def
334 * We will automatically export that to user space... */ 341 * We will automatically export that to user space... */
335 const struct iw_priv_args * private_args; 342 const struct iw_priv_args * private_args;
336 343
337 /* This field will be *removed* in the next version of WE */
338 long spy_offset; /* DO NOT USE */
339
340 /* New location of get_wireless_stats, to de-bloat struct net_device. 344 /* New location of get_wireless_stats, to de-bloat struct net_device.
341 * The old pointer in struct net_device will be gradually phased 345 * The old pointer in struct net_device will be gradually phased
342 * out, and drivers are encouraged to use this one... */ 346 * out, and drivers are encouraged to use this one... */
@@ -400,16 +404,21 @@ struct iw_spy_data
400/* --------------------- DEVICE WIRELESS DATA --------------------- */ 404/* --------------------- DEVICE WIRELESS DATA --------------------- */
401/* 405/*
402 * This is all the wireless data specific to a device instance that 406 * This is all the wireless data specific to a device instance that
403 * is managed by the core of Wireless Extensions. 407 * is managed by the core of Wireless Extensions or the 802.11 layer.
404 * We only keep pointer to those structures, so that a driver is free 408 * We only keep pointer to those structures, so that a driver is free
405 * to share them between instances. 409 * to share them between instances.
406 * This structure should be initialised before registering the device. 410 * This structure should be initialised before registering the device.
407 * Access to this data follow the same rules as any other struct net_device 411 * Access to this data follow the same rules as any other struct net_device
408 * data (i.e. valid as long as struct net_device exist, same locking rules). 412 * data (i.e. valid as long as struct net_device exist, same locking rules).
409 */ 413 */
414/* Forward declaration */
415struct ieee80211_device;
416/* The struct */
410struct iw_public_data { 417struct iw_public_data {
411 /* Driver enhanced spy support */ 418 /* Driver enhanced spy support */
412 struct iw_spy_data * spy_data; 419 struct iw_spy_data * spy_data;
420 /* Structure managed by the in-kernel IEEE 802.11 layer */
421 struct ieee80211_device * ieee80211;
413}; 422};
414 423
415/**************************** PROTOTYPES ****************************/ 424/**************************** PROTOTYPES ****************************/
@@ -424,7 +433,7 @@ struct iw_public_data {
424extern int dev_get_wireless_info(char * buffer, char **start, off_t offset, 433extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
425 int length); 434 int length);
426 435
427/* Handle IOCTLs, called in net/code/dev.c */ 436/* Handle IOCTLs, called in net/core/dev.c */
428extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd); 437extern int wireless_process_ioctl(struct ifreq *ifr, unsigned int cmd);
429 438
430/* Second : functions that may be called by driver modules */ 439/* Second : functions that may be called by driver modules */
@@ -479,7 +488,7 @@ iwe_stream_add_event(char * stream, /* Stream of events */
479 int event_len) /* Real size of payload */ 488 int event_len) /* Real size of payload */
480{ 489{
481 /* Check if it's possible */ 490 /* Check if it's possible */
482 if((stream + event_len) < ends) { 491 if(likely((stream + event_len) < ends)) {
483 iwe->len = event_len; 492 iwe->len = event_len;
484 memcpy(stream, (char *) iwe, event_len); 493 memcpy(stream, (char *) iwe, event_len);
485 stream += event_len; 494 stream += event_len;
@@ -495,14 +504,17 @@ iwe_stream_add_event(char * stream, /* Stream of events */
495static inline char * 504static inline char *
496iwe_stream_add_point(char * stream, /* Stream of events */ 505iwe_stream_add_point(char * stream, /* Stream of events */
497 char * ends, /* End of stream */ 506 char * ends, /* End of stream */
498 struct iw_event *iwe, /* Payload */ 507 struct iw_event *iwe, /* Payload length + flags */
499 char * extra) 508 char * extra) /* More payload */
500{ 509{
501 int event_len = IW_EV_POINT_LEN + iwe->u.data.length; 510 int event_len = IW_EV_POINT_LEN + iwe->u.data.length;
502 /* Check if it's possible */ 511 /* Check if it's possible */
503 if((stream + event_len) < ends) { 512 if(likely((stream + event_len) < ends)) {
504 iwe->len = event_len; 513 iwe->len = event_len;
505 memcpy(stream, (char *) iwe, IW_EV_POINT_LEN); 514 memcpy(stream, (char *) iwe, IW_EV_LCP_LEN);
515 memcpy(stream + IW_EV_LCP_LEN,
516 ((char *) iwe) + IW_EV_LCP_LEN + IW_EV_POINT_OFF,
517 IW_EV_POINT_LEN - IW_EV_LCP_LEN);
506 memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length); 518 memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length);
507 stream += event_len; 519 stream += event_len;
508 } 520 }
@@ -526,7 +538,7 @@ iwe_stream_add_value(char * event, /* Event in the stream */
526 event_len -= IW_EV_LCP_LEN; 538 event_len -= IW_EV_LCP_LEN;
527 539
528 /* Check if it's possible */ 540 /* Check if it's possible */
529 if((value + event_len) < ends) { 541 if(likely((value + event_len) < ends)) {
530 /* Add new value */ 542 /* Add new value */
531 memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len); 543 memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len);
532 value += event_len; 544 value += event_len;
@@ -537,4 +549,85 @@ iwe_stream_add_value(char * event, /* Event in the stream */
537 return value; 549 return value;
538} 550}
539 551
552/*------------------------------------------------------------------*/
553/*
554 * Wrapper to add an Wireless Event to a stream of events.
555 * Same as above, with explicit error check...
556 */
557static inline char *
558iwe_stream_check_add_event(char * stream, /* Stream of events */
559 char * ends, /* End of stream */
560 struct iw_event *iwe, /* Payload */
561 int event_len, /* Size of payload */
562 int * perr) /* Error report */
563{
564 /* Check if it's possible, set error if not */
565 if(likely((stream + event_len) < ends)) {
566 iwe->len = event_len;
567 memcpy(stream, (char *) iwe, event_len);
568 stream += event_len;
569 } else
570 *perr = -E2BIG;
571 return stream;
572}
573
574/*------------------------------------------------------------------*/
575/*
576 * Wrapper to add an short Wireless Event containing a pointer to a
577 * stream of events.
578 * Same as above, with explicit error check...
579 */
580static inline char *
581iwe_stream_check_add_point(char * stream, /* Stream of events */
582 char * ends, /* End of stream */
583 struct iw_event *iwe, /* Payload length + flags */
584 char * extra, /* More payload */
585 int * perr) /* Error report */
586{
587 int event_len = IW_EV_POINT_LEN + iwe->u.data.length;
588 /* Check if it's possible */
589 if(likely((stream + event_len) < ends)) {
590 iwe->len = event_len;
591 memcpy(stream, (char *) iwe, IW_EV_LCP_LEN);
592 memcpy(stream + IW_EV_LCP_LEN,
593 ((char *) iwe) + IW_EV_LCP_LEN + IW_EV_POINT_OFF,
594 IW_EV_POINT_LEN - IW_EV_LCP_LEN);
595 memcpy(stream + IW_EV_POINT_LEN, extra, iwe->u.data.length);
596 stream += event_len;
597 } else
598 *perr = -E2BIG;
599 return stream;
600}
601
602/*------------------------------------------------------------------*/
603/*
604 * Wrapper to add a value to a Wireless Event in a stream of events.
605 * Be careful, this one is tricky to use properly :
606 * At the first run, you need to have (value = event + IW_EV_LCP_LEN).
607 * Same as above, with explicit error check...
608 */
609static inline char *
610iwe_stream_check_add_value(char * event, /* Event in the stream */
611 char * value, /* Value in event */
612 char * ends, /* End of stream */
613 struct iw_event *iwe, /* Payload */
614 int event_len, /* Size of payload */
615 int * perr) /* Error report */
616{
617 /* Don't duplicate LCP */
618 event_len -= IW_EV_LCP_LEN;
619
620 /* Check if it's possible */
621 if(likely((value + event_len) < ends)) {
622 /* Add new value */
623 memcpy(value, (char *) iwe + IW_EV_LCP_LEN, event_len);
624 value += event_len;
625 /* Patch LCP */
626 iwe->len = value - event;
627 memcpy(event, (char *) iwe, IW_EV_LCP_LEN);
628 } else
629 *perr = -E2BIG;
630 return value;
631}
632
540#endif /* _IW_HANDLER_H */ 633#endif /* _IW_HANDLER_H */
diff --git a/include/sound/core.h b/include/sound/core.h
index f72b3ef515e2..3dc41fd5c54d 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -291,12 +291,14 @@ void snd_memory_done(void);
291int snd_memory_info_init(void); 291int snd_memory_info_init(void);
292int snd_memory_info_done(void); 292int snd_memory_info_done(void);
293void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags); 293void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags);
294void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags);
294void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags); 295void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags);
295void snd_hidden_kfree(const void *obj); 296void snd_hidden_kfree(const void *obj);
296void *snd_hidden_vmalloc(unsigned long size); 297void *snd_hidden_vmalloc(unsigned long size);
297void snd_hidden_vfree(void *obj); 298void snd_hidden_vfree(void *obj);
298char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags); 299char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags);
299#define kmalloc(size, flags) snd_hidden_kmalloc(size, flags) 300#define kmalloc(size, flags) snd_hidden_kmalloc(size, flags)
301#define kzalloc(size, flags) snd_hidden_kzalloc(size, flags)
300#define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags) 302#define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags)
301#define kfree(obj) snd_hidden_kfree(obj) 303#define kfree(obj) snd_hidden_kfree(obj)
302#define vmalloc(size) snd_hidden_vmalloc(size) 304#define vmalloc(size) snd_hidden_vmalloc(size)
diff --git a/include/video/w100fb.h b/include/video/w100fb.h
index bd548c2b47c4..e6da2d7ded8c 100644
--- a/include/video/w100fb.h
+++ b/include/video/w100fb.h
@@ -1,21 +1,149 @@
1/* 1/*
2 * Support for the w100 frame buffer. 2 * Support for the w100 frame buffer.
3 * 3 *
4 * Copyright (c) 2004 Richard Purdie 4 * Copyright (c) 2004-2005 Richard Purdie
5 * Copyright (c) 2005 Ian Molton
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
9 */ 10 */
10 11
12#define W100_GPIO_PORT_A 0
13#define W100_GPIO_PORT_B 1
14
15#define CLK_SRC_XTAL 0
16#define CLK_SRC_PLL 1
17
18struct w100fb_par;
19
20unsigned long w100fb_gpio_read(int port);
21void w100fb_gpio_write(int port, unsigned long value);
22
23/* LCD Specific Routines and Config */
24struct w100_tg_info {
25 void (*change)(struct w100fb_par*);
26 void (*suspend)(struct w100fb_par*);
27 void (*resume)(struct w100fb_par*);
28};
29
30/* General Platform Specific w100 Register Values */
31struct w100_gen_regs {
32 unsigned long lcd_format;
33 unsigned long lcdd_cntl1;
34 unsigned long lcdd_cntl2;
35 unsigned long genlcd_cntl1;
36 unsigned long genlcd_cntl2;
37 unsigned long genlcd_cntl3;
38};
39
40struct w100_gpio_regs {
41 unsigned long init_data1;
42 unsigned long init_data2;
43 unsigned long gpio_dir1;
44 unsigned long gpio_oe1;
45 unsigned long gpio_dir2;
46 unsigned long gpio_oe2;
47};
48
49/* Optional External Memory Configuration */
50struct w100_mem_info {
51 unsigned long ext_cntl;
52 unsigned long sdram_mode_reg;
53 unsigned long ext_timing_cntl;
54 unsigned long io_cntl;
55 unsigned int size;
56};
57
58struct w100_bm_mem_info {
59 unsigned long ext_mem_bw;
60 unsigned long offset;
61 unsigned long ext_timing_ctl;
62 unsigned long ext_cntl;
63 unsigned long mode_reg;
64 unsigned long io_cntl;
65 unsigned long config;
66};
67
68/* LCD Mode definition */
69struct w100_mode {
70 unsigned int xres;
71 unsigned int yres;
72 unsigned short left_margin;
73 unsigned short right_margin;
74 unsigned short upper_margin;
75 unsigned short lower_margin;
76 unsigned long crtc_ss;
77 unsigned long crtc_ls;
78 unsigned long crtc_gs;
79 unsigned long crtc_vpos_gs;
80 unsigned long crtc_rev;
81 unsigned long crtc_dclk;
82 unsigned long crtc_gclk;
83 unsigned long crtc_goe;
84 unsigned long crtc_ps1_active;
85 char pll_freq;
86 char fast_pll_freq;
87 int sysclk_src;
88 int sysclk_divider;
89 int pixclk_src;
90 int pixclk_divider;
91 int pixclk_divider_rotated;
92};
93
94struct w100_pll_info {
95 uint16_t freq; /* desired Fout for PLL (Mhz) */
96 uint8_t M; /* input divider */
97 uint8_t N_int; /* VCO multiplier */
98 uint8_t N_fac; /* VCO multiplier fractional part */
99 uint8_t tfgoal;
100 uint8_t lock_time;
101};
102
103/* Initial Video mode orientation flags */
104#define INIT_MODE_ROTATED 0x1
105#define INIT_MODE_FLIPPED 0x2
106
11/* 107/*
12 * This structure describes the machine which we are running on. 108 * This structure describes the machine which we are running on.
13 * It is set by machine specific code and used in the probe routine 109 * It is set by machine specific code and used in the probe routine
14 * of drivers/video/w100fb.c 110 * of drivers/video/w100fb.c
15 */ 111 */
16
17struct w100fb_mach_info { 112struct w100fb_mach_info {
18 void (*w100fb_ssp_send)(u8 adrs, u8 data); 113 /* General Platform Specific Registers */
19 int comadj; 114 struct w100_gen_regs *regs;
20 int phadadj; 115 /* Table of modes the LCD is capable of */
116 struct w100_mode *modelist;
117 unsigned int num_modes;
118 /* Hooks for any platform specific tg/lcd code (optional) */
119 struct w100_tg_info *tg;
120 /* External memory definition (if present) */
121 struct w100_mem_info *mem;
122 /* Additional External memory definition (if present) */
123 struct w100_bm_mem_info *bm_mem;
124 /* GPIO definitions (optional) */
125 struct w100_gpio_regs *gpio;
126 /* Initial Mode flags */
127 unsigned int init_mode;
128 /* Xtal Frequency */
129 unsigned int xtal_freq;
130 /* Enable Xtal input doubler (1 == enable) */
131 unsigned int xtal_dbl;
132};
133
134/* General frame buffer data structure */
135struct w100fb_par {
136 unsigned int chip_id;
137 unsigned int xres;
138 unsigned int yres;
139 unsigned int extmem_active;
140 unsigned int flip;
141 unsigned int blanked;
142 unsigned int fastpll_mode;
143 unsigned long hsync_len;
144 struct w100_mode *mode;
145 struct w100_pll_info *pll_table;
146 struct w100fb_mach_info *mach;
147 uint32_t *saved_intmem;
148 uint32_t *saved_extmem;
21}; 149};
diff --git a/init/main.c b/init/main.c
index ff410063e4e1..f142d4035341 100644
--- a/init/main.c
+++ b/init/main.c
@@ -123,6 +123,7 @@ extern void softirq_init(void);
123char saved_command_line[COMMAND_LINE_SIZE]; 123char saved_command_line[COMMAND_LINE_SIZE];
124 124
125static char *execute_command; 125static char *execute_command;
126static char *ramdisk_execute_command;
126 127
127/* Setup configured maximum number of CPUs to activate */ 128/* Setup configured maximum number of CPUs to activate */
128static unsigned int max_cpus = NR_CPUS; 129static unsigned int max_cpus = NR_CPUS;
@@ -297,6 +298,18 @@ static int __init init_setup(char *str)
297} 298}
298__setup("init=", init_setup); 299__setup("init=", init_setup);
299 300
301static int __init rdinit_setup(char *str)
302{
303 unsigned int i;
304
305 ramdisk_execute_command = str;
306 /* See "auto" comment in init_setup */
307 for (i = 1; i < MAX_INIT_ARGS; i++)
308 argv_init[i] = NULL;
309 return 1;
310}
311__setup("rdinit=", rdinit_setup);
312
300extern void setup_arch(char **); 313extern void setup_arch(char **);
301 314
302#ifndef CONFIG_SMP 315#ifndef CONFIG_SMP
@@ -614,6 +627,7 @@ static void do_pre_smp_initcalls(void)
614 migration_init(); 627 migration_init();
615#endif 628#endif
616 spawn_ksoftirqd(); 629 spawn_ksoftirqd();
630 spawn_softlockup_task();
617} 631}
618 632
619static void run_init_process(char *init_filename) 633static void run_init_process(char *init_filename)
@@ -680,10 +694,14 @@ static int init(void * unused)
680 * check if there is an early userspace init. If yes, let it do all 694 * check if there is an early userspace init. If yes, let it do all
681 * the work 695 * the work
682 */ 696 */
683 if (sys_access((const char __user *) "/init", 0) == 0) 697
684 execute_command = "/init"; 698 if (!ramdisk_execute_command)
685 else 699 ramdisk_execute_command = "/init";
700
701 if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
702 ramdisk_execute_command = NULL;
686 prepare_namespace(); 703 prepare_namespace();
704 }
687 705
688 /* 706 /*
689 * Ok, we have completed the initial bootup, and 707 * Ok, we have completed the initial bootup, and
@@ -700,17 +718,24 @@ static int init(void * unused)
700 718
701 (void) sys_dup(0); 719 (void) sys_dup(0);
702 (void) sys_dup(0); 720 (void) sys_dup(0);
703 721
722 if (ramdisk_execute_command) {
723 run_init_process(ramdisk_execute_command);
724 printk(KERN_WARNING "Failed to execute %s\n",
725 ramdisk_execute_command);
726 }
727
704 /* 728 /*
705 * We try each of these until one succeeds. 729 * We try each of these until one succeeds.
706 * 730 *
707 * The Bourne shell can be used instead of init if we are 731 * The Bourne shell can be used instead of init if we are
708 * trying to recover a really broken machine. 732 * trying to recover a really broken machine.
709 */ 733 */
710 734 if (execute_command) {
711 if (execute_command)
712 run_init_process(execute_command); 735 run_init_process(execute_command);
713 736 printk(KERN_WARNING "Failed to execute %s. Attempting "
737 "defaults...\n", execute_command);
738 }
714 run_init_process("/sbin/init"); 739 run_init_process("/sbin/init");
715 run_init_process("/etc/init"); 740 run_init_process("/etc/init");
716 run_init_process("/bin/init"); 741 run_init_process("/bin/init");
diff --git a/ipc/compat.c b/ipc/compat.c
index 3881d564c668..1fe95f6659dd 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -42,10 +42,10 @@ struct compat_msgbuf {
42 42
43struct compat_ipc_perm { 43struct compat_ipc_perm {
44 key_t key; 44 key_t key;
45 compat_uid_t uid; 45 __compat_uid_t uid;
46 compat_gid_t gid; 46 __compat_gid_t gid;
47 compat_uid_t cuid; 47 __compat_uid_t cuid;
48 compat_gid_t cgid; 48 __compat_gid_t cgid;
49 compat_mode_t mode; 49 compat_mode_t mode;
50 unsigned short seq; 50 unsigned short seq;
51}; 51};
@@ -174,8 +174,8 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p,
174 struct compat_ipc_perm __user *up) 174 struct compat_ipc_perm __user *up)
175{ 175{
176 int err; 176 int err;
177 compat_uid_t u; 177 __compat_uid_t u;
178 compat_gid_t g; 178 __compat_gid_t g;
179 179
180 err = __put_user(p->key, &up->key); 180 err = __put_user(p->key, &up->key);
181 SET_UID(u, p->uid); 181 SET_UID(u, p->uid);
diff --git a/ipc/msg.c b/ipc/msg.c
index 27e516f96cdc..d035bd2aba96 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -26,6 +26,7 @@
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/syscalls.h> 27#include <linux/syscalls.h>
28#include <linux/audit.h> 28#include <linux/audit.h>
29#include <linux/seq_file.h>
29#include <asm/current.h> 30#include <asm/current.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include "util.h" 32#include "util.h"
@@ -74,16 +75,16 @@ static struct ipc_ids msg_ids;
74static void freeque (struct msg_queue *msq, int id); 75static void freeque (struct msg_queue *msq, int id);
75static int newque (key_t key, int msgflg); 76static int newque (key_t key, int msgflg);
76#ifdef CONFIG_PROC_FS 77#ifdef CONFIG_PROC_FS
77static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); 78static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
78#endif 79#endif
79 80
80void __init msg_init (void) 81void __init msg_init (void)
81{ 82{
82 ipc_init_ids(&msg_ids,msg_ctlmni); 83 ipc_init_ids(&msg_ids,msg_ctlmni);
83 84 ipc_init_proc_interface("sysvipc/msg",
84#ifdef CONFIG_PROC_FS 85 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
85 create_proc_read_entry("sysvipc/msg", 0, NULL, sysvipc_msg_read_proc, NULL); 86 &msg_ids,
86#endif 87 sysvipc_msg_proc_show);
87} 88}
88 89
89static int newque (key_t key, int msgflg) 90static int newque (key_t key, int msgflg)
@@ -113,6 +114,7 @@ static int newque (key_t key, int msgflg)
113 return -ENOSPC; 114 return -ENOSPC;
114 } 115 }
115 116
117 msq->q_id = msg_buildid(id,msq->q_perm.seq);
116 msq->q_stime = msq->q_rtime = 0; 118 msq->q_stime = msq->q_rtime = 0;
117 msq->q_ctime = get_seconds(); 119 msq->q_ctime = get_seconds();
118 msq->q_cbytes = msq->q_qnum = 0; 120 msq->q_cbytes = msq->q_qnum = 0;
@@ -123,7 +125,7 @@ static int newque (key_t key, int msgflg)
123 INIT_LIST_HEAD(&msq->q_senders); 125 INIT_LIST_HEAD(&msq->q_senders);
124 msg_unlock(msq); 126 msg_unlock(msq);
125 127
126 return msg_buildid(id,msq->q_perm.seq); 128 return msq->q_id;
127} 129}
128 130
129static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) 131static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss)
@@ -808,55 +810,25 @@ out_unlock:
808} 810}
809 811
810#ifdef CONFIG_PROC_FS 812#ifdef CONFIG_PROC_FS
811static int sysvipc_msg_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 813static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
812{ 814{
813 off_t pos = 0; 815 struct msg_queue *msq = it;
814 off_t begin = 0; 816
815 int i, len = 0; 817 return seq_printf(s,
816 818 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
817 down(&msg_ids.sem); 819 msq->q_perm.key,
818 len += sprintf(buffer, " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n"); 820 msq->q_id,
819 821 msq->q_perm.mode,
820 for(i = 0; i <= msg_ids.max_id; i++) { 822 msq->q_cbytes,
821 struct msg_queue * msq; 823 msq->q_qnum,
822 msq = msg_lock(i); 824 msq->q_lspid,
823 if(msq != NULL) { 825 msq->q_lrpid,
824 len += sprintf(buffer + len, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", 826 msq->q_perm.uid,
825 msq->q_perm.key, 827 msq->q_perm.gid,
826 msg_buildid(i,msq->q_perm.seq), 828 msq->q_perm.cuid,
827 msq->q_perm.mode, 829 msq->q_perm.cgid,
828 msq->q_cbytes, 830 msq->q_stime,
829 msq->q_qnum, 831 msq->q_rtime,
830 msq->q_lspid, 832 msq->q_ctime);
831 msq->q_lrpid,
832 msq->q_perm.uid,
833 msq->q_perm.gid,
834 msq->q_perm.cuid,
835 msq->q_perm.cgid,
836 msq->q_stime,
837 msq->q_rtime,
838 msq->q_ctime);
839 msg_unlock(msq);
840
841 pos += len;
842 if(pos < offset) {
843 len = 0;
844 begin = pos;
845 }
846 if(pos > offset + length)
847 goto done;
848 }
849
850 }
851 *eof = 1;
852done:
853 up(&msg_ids.sem);
854 *start = buffer + (offset - begin);
855 len -= (offset - begin);
856 if(len > length)
857 len = length;
858 if(len < 0)
859 len = 0;
860 return len;
861} 833}
862#endif 834#endif
diff --git a/ipc/sem.c b/ipc/sem.c
index 70975ce0784a..19af028a3e38 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -73,6 +73,7 @@
73#include <linux/security.h> 73#include <linux/security.h>
74#include <linux/syscalls.h> 74#include <linux/syscalls.h>
75#include <linux/audit.h> 75#include <linux/audit.h>
76#include <linux/seq_file.h>
76#include <asm/uaccess.h> 77#include <asm/uaccess.h>
77#include "util.h" 78#include "util.h"
78 79
@@ -89,7 +90,7 @@ static struct ipc_ids sem_ids;
89static int newary (key_t, int, int); 90static int newary (key_t, int, int);
90static void freeary (struct sem_array *sma, int id); 91static void freeary (struct sem_array *sma, int id);
91#ifdef CONFIG_PROC_FS 92#ifdef CONFIG_PROC_FS
92static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); 93static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
93#endif 94#endif
94 95
95#define SEMMSL_FAST 256 /* 512 bytes on stack */ 96#define SEMMSL_FAST 256 /* 512 bytes on stack */
@@ -116,10 +117,10 @@ void __init sem_init (void)
116{ 117{
117 used_sems = 0; 118 used_sems = 0;
118 ipc_init_ids(&sem_ids,sc_semmni); 119 ipc_init_ids(&sem_ids,sc_semmni);
119 120 ipc_init_proc_interface("sysvipc/sem",
120#ifdef CONFIG_PROC_FS 121 " key semid perms nsems uid gid cuid cgid otime ctime\n",
121 create_proc_read_entry("sysvipc/sem", 0, NULL, sysvipc_sem_read_proc, NULL); 122 &sem_ids,
122#endif 123 sysvipc_sem_proc_show);
123} 124}
124 125
125/* 126/*
@@ -193,6 +194,7 @@ static int newary (key_t key, int nsems, int semflg)
193 } 194 }
194 used_sems += nsems; 195 used_sems += nsems;
195 196
197 sma->sem_id = sem_buildid(id, sma->sem_perm.seq);
196 sma->sem_base = (struct sem *) &sma[1]; 198 sma->sem_base = (struct sem *) &sma[1];
197 /* sma->sem_pending = NULL; */ 199 /* sma->sem_pending = NULL; */
198 sma->sem_pending_last = &sma->sem_pending; 200 sma->sem_pending_last = &sma->sem_pending;
@@ -201,7 +203,7 @@ static int newary (key_t key, int nsems, int semflg)
201 sma->sem_ctime = get_seconds(); 203 sma->sem_ctime = get_seconds();
202 sem_unlock(sma); 204 sem_unlock(sma);
203 205
204 return sem_buildid(id, sma->sem_perm.seq); 206 return sma->sem_id;
205} 207}
206 208
207asmlinkage long sys_semget (key_t key, int nsems, int semflg) 209asmlinkage long sys_semget (key_t key, int nsems, int semflg)
@@ -1328,50 +1330,21 @@ next_entry:
1328} 1330}
1329 1331
1330#ifdef CONFIG_PROC_FS 1332#ifdef CONFIG_PROC_FS
1331static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 1333static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
1332{ 1334{
1333 off_t pos = 0; 1335 struct sem_array *sma = it;
1334 off_t begin = 0; 1336
1335 int i, len = 0; 1337 return seq_printf(s,
1336 1338 "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n",
1337 len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n"); 1339 sma->sem_perm.key,
1338 down(&sem_ids.sem); 1340 sma->sem_id,
1339 1341 sma->sem_perm.mode,
1340 for(i = 0; i <= sem_ids.max_id; i++) { 1342 sma->sem_nsems,
1341 struct sem_array *sma; 1343 sma->sem_perm.uid,
1342 sma = sem_lock(i); 1344 sma->sem_perm.gid,
1343 if(sma) { 1345 sma->sem_perm.cuid,
1344 len += sprintf(buffer + len, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", 1346 sma->sem_perm.cgid,
1345 sma->sem_perm.key, 1347 sma->sem_otime,
1346 sem_buildid(i,sma->sem_perm.seq), 1348 sma->sem_ctime);
1347 sma->sem_perm.mode,
1348 sma->sem_nsems,
1349 sma->sem_perm.uid,
1350 sma->sem_perm.gid,
1351 sma->sem_perm.cuid,
1352 sma->sem_perm.cgid,
1353 sma->sem_otime,
1354 sma->sem_ctime);
1355 sem_unlock(sma);
1356
1357 pos += len;
1358 if(pos < offset) {
1359 len = 0;
1360 begin = pos;
1361 }
1362 if(pos > offset + length)
1363 goto done;
1364 }
1365 }
1366 *eof = 1;
1367done:
1368 up(&sem_ids.sem);
1369 *start = buffer + (offset - begin);
1370 len -= (offset - begin);
1371 if(len > length)
1372 len = length;
1373 if(len < 0)
1374 len = 0;
1375 return len;
1376} 1349}
1377#endif 1350#endif
diff --git a/ipc/shm.c b/ipc/shm.c
index 1d6cf08d950b..dca90489e3b0 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -23,12 +23,12 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/file.h> 24#include <linux/file.h>
25#include <linux/mman.h> 25#include <linux/mman.h>
26#include <linux/proc_fs.h>
27#include <linux/shmem_fs.h> 26#include <linux/shmem_fs.h>
28#include <linux/security.h> 27#include <linux/security.h>
29#include <linux/syscalls.h> 28#include <linux/syscalls.h>
30#include <linux/audit.h> 29#include <linux/audit.h>
31#include <linux/ptrace.h> 30#include <linux/ptrace.h>
31#include <linux/seq_file.h>
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34 34
@@ -51,7 +51,7 @@ static int newseg (key_t key, int shmflg, size_t size);
51static void shm_open (struct vm_area_struct *shmd); 51static void shm_open (struct vm_area_struct *shmd);
52static void shm_close (struct vm_area_struct *shmd); 52static void shm_close (struct vm_area_struct *shmd);
53#ifdef CONFIG_PROC_FS 53#ifdef CONFIG_PROC_FS
54static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); 54static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
55#endif 55#endif
56 56
57size_t shm_ctlmax = SHMMAX; 57size_t shm_ctlmax = SHMMAX;
@@ -63,9 +63,10 @@ static int shm_tot; /* total number of shared memory pages */
63void __init shm_init (void) 63void __init shm_init (void)
64{ 64{
65 ipc_init_ids(&shm_ids, 1); 65 ipc_init_ids(&shm_ids, 1);
66#ifdef CONFIG_PROC_FS 66 ipc_init_proc_interface("sysvipc/shm",
67 create_proc_read_entry("sysvipc/shm", 0, NULL, sysvipc_shm_read_proc, NULL); 67 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
68#endif 68 &shm_ids,
69 sysvipc_shm_proc_show);
69} 70}
70 71
71static inline int shm_checkid(struct shmid_kernel *s, int id) 72static inline int shm_checkid(struct shmid_kernel *s, int id)
@@ -869,63 +870,32 @@ asmlinkage long sys_shmdt(char __user *shmaddr)
869} 870}
870 871
871#ifdef CONFIG_PROC_FS 872#ifdef CONFIG_PROC_FS
872static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 873static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
873{ 874{
874 off_t pos = 0; 875 struct shmid_kernel *shp = it;
875 off_t begin = 0; 876 char *format;
876 int i, len = 0;
877
878 down(&shm_ids.sem);
879 len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
880 877
881 for(i = 0; i <= shm_ids.max_id; i++) {
882 struct shmid_kernel* shp;
883
884 shp = shm_lock(i);
885 if(shp!=NULL) {
886#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 878#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
887#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 879#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
888 char *format;
889 880
890 if (sizeof(size_t) <= sizeof(int)) 881 if (sizeof(size_t) <= sizeof(int))
891 format = SMALL_STRING; 882 format = SMALL_STRING;
892 else 883 else
893 format = BIG_STRING; 884 format = BIG_STRING;
894 len += sprintf(buffer + len, format, 885 return seq_printf(s, format,
895 shp->shm_perm.key, 886 shp->shm_perm.key,
896 shm_buildid(i, shp->shm_perm.seq), 887 shp->id,
897 shp->shm_flags, 888 shp->shm_flags,
898 shp->shm_segsz, 889 shp->shm_segsz,
899 shp->shm_cprid, 890 shp->shm_cprid,
900 shp->shm_lprid, 891 shp->shm_lprid,
901 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch, 892 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
902 shp->shm_perm.uid, 893 shp->shm_perm.uid,
903 shp->shm_perm.gid, 894 shp->shm_perm.gid,
904 shp->shm_perm.cuid, 895 shp->shm_perm.cuid,
905 shp->shm_perm.cgid, 896 shp->shm_perm.cgid,
906 shp->shm_atim, 897 shp->shm_atim,
907 shp->shm_dtim, 898 shp->shm_dtim,
908 shp->shm_ctim); 899 shp->shm_ctim);
909 shm_unlock(shp);
910
911 pos += len;
912 if(pos < offset) {
913 len = 0;
914 begin = pos;
915 }
916 if(pos > offset + length)
917 goto done;
918 }
919 }
920 *eof = 1;
921done:
922 up(&shm_ids.sem);
923 *start = buffer + (offset - begin);
924 len -= (offset - begin);
925 if(len > length)
926 len = length;
927 if(len < 0)
928 len = 0;
929 return len;
930} 900}
931#endif 901#endif
diff --git a/ipc/util.c b/ipc/util.c
index e00c35f7b2b8..10e836d0d89e 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -24,11 +24,20 @@
24#include <linux/security.h> 24#include <linux/security.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/seq_file.h>
28#include <linux/proc_fs.h>
27 29
28#include <asm/unistd.h> 30#include <asm/unistd.h>
29 31
30#include "util.h" 32#include "util.h"
31 33
34struct ipc_proc_iface {
35 const char *path;
36 const char *header;
37 struct ipc_ids *ids;
38 int (*show)(struct seq_file *, void *);
39};
40
32/** 41/**
33 * ipc_init - initialise IPC subsystem 42 * ipc_init - initialise IPC subsystem
34 * 43 *
@@ -86,6 +95,43 @@ void __init ipc_init_ids(struct ipc_ids* ids, int size)
86 ids->entries->p[i] = NULL; 95 ids->entries->p[i] = NULL;
87} 96}
88 97
98#ifdef CONFIG_PROC_FS
99static struct file_operations sysvipc_proc_fops;
100/**
101 * ipc_init_proc_interface - Create a proc interface for sysipc types
102 * using a seq_file interface.
103 * @path: Path in procfs
104 * @header: Banner to be printed at the beginning of the file.
105 * @ids: ipc id table to iterate.
106 * @show: show routine.
107 */
108void __init ipc_init_proc_interface(const char *path, const char *header,
109 struct ipc_ids *ids,
110 int (*show)(struct seq_file *, void *))
111{
112 struct proc_dir_entry *pde;
113 struct ipc_proc_iface *iface;
114
115 iface = kmalloc(sizeof(*iface), GFP_KERNEL);
116 if (!iface)
117 return;
118 iface->path = path;
119 iface->header = header;
120 iface->ids = ids;
121 iface->show = show;
122
123 pde = create_proc_entry(path,
124 S_IRUGO, /* world readable */
125 NULL /* parent dir */);
126 if (pde) {
127 pde->data = iface;
128 pde->proc_fops = &sysvipc_proc_fops;
129 } else {
130 kfree(iface);
131 }
132}
133#endif
134
89/** 135/**
90 * ipc_findkey - find a key in an ipc identifier set 136 * ipc_findkey - find a key in an ipc identifier set
91 * @ids: Identifier set 137 * @ids: Identifier set
@@ -578,3 +624,113 @@ int ipc_parse_version (int *cmd)
578} 624}
579 625
580#endif /* __ARCH_WANT_IPC_PARSE_VERSION */ 626#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
627
628#ifdef CONFIG_PROC_FS
629static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
630{
631 struct ipc_proc_iface *iface = s->private;
632 struct kern_ipc_perm *ipc = it;
633 loff_t p;
634
635 /* If we had an ipc id locked before, unlock it */
636 if (ipc && ipc != SEQ_START_TOKEN)
637 ipc_unlock(ipc);
638
639 /*
640 * p = *pos - 1 (because id 0 starts at position 1)
641 * + 1 (because we increment the position by one)
642 */
643 for (p = *pos; p <= iface->ids->max_id; p++) {
644 if ((ipc = ipc_lock(iface->ids, p)) != NULL) {
645 *pos = p + 1;
646 return ipc;
647 }
648 }
649
650 /* Out of range - return NULL to terminate iteration */
651 return NULL;
652}
653
654/*
655 * File positions: pos 0 -> header, pos n -> ipc id + 1.
656 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START.
657 */
658static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
659{
660 struct ipc_proc_iface *iface = s->private;
661 struct kern_ipc_perm *ipc;
662 loff_t p;
663
664 /*
665 * Take the lock - this will be released by the corresponding
666 * call to stop().
667 */
668 down(&iface->ids->sem);
669
670 /* pos < 0 is invalid */
671 if (*pos < 0)
672 return NULL;
673
674 /* pos == 0 means header */
675 if (*pos == 0)
676 return SEQ_START_TOKEN;
677
678 /* Find the (pos-1)th ipc */
679 for (p = *pos - 1; p <= iface->ids->max_id; p++) {
680 if ((ipc = ipc_lock(iface->ids, p)) != NULL) {
681 *pos = p + 1;
682 return ipc;
683 }
684 }
685 return NULL;
686}
687
688static void sysvipc_proc_stop(struct seq_file *s, void *it)
689{
690 struct kern_ipc_perm *ipc = it;
691 struct ipc_proc_iface *iface = s->private;
692
693 /* If we had a locked segment, release it */
694 if (ipc && ipc != SEQ_START_TOKEN)
695 ipc_unlock(ipc);
696
697 /* Release the lock we took in start() */
698 up(&iface->ids->sem);
699}
700
701static int sysvipc_proc_show(struct seq_file *s, void *it)
702{
703 struct ipc_proc_iface *iface = s->private;
704
705 if (it == SEQ_START_TOKEN)
706 return seq_puts(s, iface->header);
707
708 return iface->show(s, it);
709}
710
711static struct seq_operations sysvipc_proc_seqops = {
712 .start = sysvipc_proc_start,
713 .stop = sysvipc_proc_stop,
714 .next = sysvipc_proc_next,
715 .show = sysvipc_proc_show,
716};
717
718static int sysvipc_proc_open(struct inode *inode, struct file *file) {
719 int ret;
720 struct seq_file *seq;
721
722 ret = seq_open(file, &sysvipc_proc_seqops);
723 if (!ret) {
724 seq = file->private_data;
725 seq->private = PDE(inode)->data;
726 }
727 return ret;
728}
729
730static struct file_operations sysvipc_proc_fops = {
731 .open = sysvipc_proc_open,
732 .read = seq_read,
733 .llseek = seq_lseek,
734 .release = seq_release,
735};
736#endif /* CONFIG_PROC_FS */
diff --git a/ipc/util.h b/ipc/util.h
index 44348ca5a707..fc9a28be0797 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -30,7 +30,15 @@ struct ipc_ids {
30 struct ipc_id_ary* entries; 30 struct ipc_id_ary* entries;
31}; 31};
32 32
33struct seq_file;
33void __init ipc_init_ids(struct ipc_ids* ids, int size); 34void __init ipc_init_ids(struct ipc_ids* ids, int size);
35#ifdef CONFIG_PROC_FS
36void __init ipc_init_proc_interface(const char *path, const char *header,
37 struct ipc_ids *ids,
38 int (*show)(struct seq_file *, void *));
39#else
40#define ipc_init_proc_interface(path, header, ids, show) do {} while (0)
41#endif
34 42
35/* must be called with ids->sem acquired.*/ 43/* must be called with ids->sem acquired.*/
36int ipc_findkey(struct ipc_ids* ids, key_t key); 44int ipc_findkey(struct ipc_ids* ids, key_t key);
diff --git a/kernel/Makefile b/kernel/Makefile
index cb05cd05d237..8d57a2f1226b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_AUDIT) += audit.o
27obj-$(CONFIG_AUDITSYSCALL) += auditsc.o 27obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
28obj-$(CONFIG_KPROBES) += kprobes.o 28obj-$(CONFIG_KPROBES) += kprobes.o
29obj-$(CONFIG_SYSFS) += ksysfs.o 29obj-$(CONFIG_SYSFS) += ksysfs.o
30obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
30obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ 31obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
31obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 32obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
32obj-$(CONFIG_SECCOMP) += seccomp.o 33obj-$(CONFIG_SECCOMP) += seccomp.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 4168f631868e..f70e6027cca9 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -220,7 +220,7 @@ asmlinkage long sys_acct(const char __user *name)
220 return (PTR_ERR(tmp)); 220 return (PTR_ERR(tmp));
221 } 221 }
222 /* Difference from BSD - they don't do O_APPEND */ 222 /* Difference from BSD - they don't do O_APPEND */
223 file = filp_open(tmp, O_WRONLY|O_APPEND, 0); 223 file = filp_open(tmp, O_WRONLY|O_APPEND|O_LARGEFILE, 0);
224 putname(tmp); 224 putname(tmp);
225 if (IS_ERR(file)) { 225 if (IS_ERR(file)) {
226 return (PTR_ERR(file)); 226 return (PTR_ERR(file));
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 8ab1b4e518b8..1f06e7690106 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -628,13 +628,6 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
628 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair. 628 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
629 */ 629 */
630 630
631/*
632 * Hack to avoid 2.6.13 partial node dynamic sched domain bug.
633 * Disable letting 'cpu_exclusive' cpusets define dynamic sched
634 * domains, until the sched domain can handle partial nodes.
635 * Remove this #if hackery when sched domains fixed.
636 */
637#if 0
638static void update_cpu_domains(struct cpuset *cur) 631static void update_cpu_domains(struct cpuset *cur)
639{ 632{
640 struct cpuset *c, *par = cur->parent; 633 struct cpuset *c, *par = cur->parent;
@@ -675,11 +668,6 @@ static void update_cpu_domains(struct cpuset *cur)
675 partition_sched_domains(&pspan, &cspan); 668 partition_sched_domains(&pspan, &cspan);
676 unlock_cpu_hotplug(); 669 unlock_cpu_hotplug();
677} 670}
678#else
679static void update_cpu_domains(struct cpuset *cur)
680{
681}
682#endif
683 671
684static int update_cpumask(struct cpuset *cs, char *buf) 672static int update_cpumask(struct cpuset *cs, char *buf)
685{ 673{
@@ -1611,17 +1599,114 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
1611 return 0; 1599 return 0;
1612} 1600}
1613 1601
1602/*
1603 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
1604 * ancestor to the specified cpuset. Call while holding cpuset_sem.
1605 * If no ancestor is mem_exclusive (an unusual configuration), then
1606 * returns the root cpuset.
1607 */
1608static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
1609{
1610 while (!is_mem_exclusive(cs) && cs->parent)
1611 cs = cs->parent;
1612 return cs;
1613}
1614
1614/** 1615/**
1615 * cpuset_zone_allowed - is zone z allowed in current->mems_allowed 1616 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node?
1616 * @z: zone in question 1617 * @z: is this zone on an allowed node?
1618 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL)
1617 * 1619 *
1618 * Is zone z allowed in current->mems_allowed, or is 1620 * If we're in interrupt, yes, we can always allocate. If zone
1619 * the CPU in interrupt context? (zone is always allowed in this case) 1621 * z's node is in our tasks mems_allowed, yes. If it's not a
1620 */ 1622 * __GFP_HARDWALL request and this zone's nodes is in the nearest
1621int cpuset_zone_allowed(struct zone *z) 1623 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
1624 * Otherwise, no.
1625 *
1626 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
1627 * and do not allow allocations outside the current tasks cpuset.
1628 * GFP_KERNEL allocations are not so marked, so can escape to the
1629 * nearest mem_exclusive ancestor cpuset.
1630 *
1631 * Scanning up parent cpusets requires cpuset_sem. The __alloc_pages()
1632 * routine only calls here with __GFP_HARDWALL bit _not_ set if
1633 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
1634 * mems_allowed came up empty on the first pass over the zonelist.
1635 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
1636 * short of memory, might require taking the cpuset_sem semaphore.
1637 *
1638 * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages()
1639 * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing
1640 * hardwall cpusets - no allocation on a node outside the cpuset is
1641 * allowed (unless in interrupt, of course).
1642 *
1643 * The second loop doesn't even call here for GFP_ATOMIC requests
1644 * (if the __alloc_pages() local variable 'wait' is set). That check
1645 * and the checks below have the combined affect in the second loop of
1646 * the __alloc_pages() routine that:
1647 * in_interrupt - any node ok (current task context irrelevant)
1648 * GFP_ATOMIC - any node ok
1649 * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok
1650 * GFP_USER - only nodes in current tasks mems allowed ok.
1651 **/
1652
1653int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
1622{ 1654{
1623 return in_interrupt() || 1655 int node; /* node that zone z is on */
1624 node_isset(z->zone_pgdat->node_id, current->mems_allowed); 1656 const struct cpuset *cs; /* current cpuset ancestors */
1657 int allowed = 1; /* is allocation in zone z allowed? */
1658
1659 if (in_interrupt())
1660 return 1;
1661 node = z->zone_pgdat->node_id;
1662 if (node_isset(node, current->mems_allowed))
1663 return 1;
1664 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
1665 return 0;
1666
1667 /* Not hardwall and node outside mems_allowed: scan up cpusets */
1668 down(&cpuset_sem);
1669 cs = current->cpuset;
1670 if (!cs)
1671 goto done; /* current task exiting */
1672 cs = nearest_exclusive_ancestor(cs);
1673 allowed = node_isset(node, cs->mems_allowed);
1674done:
1675 up(&cpuset_sem);
1676 return allowed;
1677}
1678
1679/**
1680 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
1681 * @p: pointer to task_struct of some other task.
1682 *
1683 * Description: Return true if the nearest mem_exclusive ancestor
1684 * cpusets of tasks @p and current overlap. Used by oom killer to
1685 * determine if task @p's memory usage might impact the memory
1686 * available to the current task.
1687 *
1688 * Acquires cpuset_sem - not suitable for calling from a fast path.
1689 **/
1690
1691int cpuset_excl_nodes_overlap(const struct task_struct *p)
1692{
1693 const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
1694 int overlap = 0; /* do cpusets overlap? */
1695
1696 down(&cpuset_sem);
1697 cs1 = current->cpuset;
1698 if (!cs1)
1699 goto done; /* current task exiting */
1700 cs2 = p->cpuset;
1701 if (!cs2)
1702 goto done; /* task p is exiting */
1703 cs1 = nearest_exclusive_ancestor(cs1);
1704 cs2 = nearest_exclusive_ancestor(cs2);
1705 overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
1706done:
1707 up(&cpuset_sem);
1708
1709 return overlap;
1625} 1710}
1626 1711
1627/* 1712/*
diff --git a/kernel/futex.c b/kernel/futex.c
index c7130f86106c..ca05fe6a70b2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -40,6 +40,7 @@
40#include <linux/pagemap.h> 40#include <linux/pagemap.h>
41#include <linux/syscalls.h> 41#include <linux/syscalls.h>
42#include <linux/signal.h> 42#include <linux/signal.h>
43#include <asm/futex.h>
43 44
44#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8) 45#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
45 46
@@ -327,6 +328,118 @@ out:
327} 328}
328 329
329/* 330/*
331 * Wake up all waiters hashed on the physical page that is mapped
332 * to this virtual address:
333 */
334static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op)
335{
336 union futex_key key1, key2;
337 struct futex_hash_bucket *bh1, *bh2;
338 struct list_head *head;
339 struct futex_q *this, *next;
340 int ret, op_ret, attempt = 0;
341
342retryfull:
343 down_read(&current->mm->mmap_sem);
344
345 ret = get_futex_key(uaddr1, &key1);
346 if (unlikely(ret != 0))
347 goto out;
348 ret = get_futex_key(uaddr2, &key2);
349 if (unlikely(ret != 0))
350 goto out;
351
352 bh1 = hash_futex(&key1);
353 bh2 = hash_futex(&key2);
354
355retry:
356 if (bh1 < bh2)
357 spin_lock(&bh1->lock);
358 spin_lock(&bh2->lock);
359 if (bh1 > bh2)
360 spin_lock(&bh1->lock);
361
362 op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2);
363 if (unlikely(op_ret < 0)) {
364 int dummy;
365
366 spin_unlock(&bh1->lock);
367 if (bh1 != bh2)
368 spin_unlock(&bh2->lock);
369
370 /* futex_atomic_op_inuser needs to both read and write
371 * *(int __user *)uaddr2, but we can't modify it
372 * non-atomically. Therefore, if get_user below is not
373 * enough, we need to handle the fault ourselves, while
374 * still holding the mmap_sem. */
375 if (attempt++) {
376 struct vm_area_struct * vma;
377 struct mm_struct *mm = current->mm;
378
379 ret = -EFAULT;
380 if (attempt >= 2 ||
381 !(vma = find_vma(mm, uaddr2)) ||
382 vma->vm_start > uaddr2 ||
383 !(vma->vm_flags & VM_WRITE))
384 goto out;
385
386 switch (handle_mm_fault(mm, vma, uaddr2, 1)) {
387 case VM_FAULT_MINOR:
388 current->min_flt++;
389 break;
390 case VM_FAULT_MAJOR:
391 current->maj_flt++;
392 break;
393 default:
394 goto out;
395 }
396 goto retry;
397 }
398
399 /* If we would have faulted, release mmap_sem,
400 * fault it in and start all over again. */
401 up_read(&current->mm->mmap_sem);
402
403 ret = get_user(dummy, (int __user *)uaddr2);
404 if (ret)
405 return ret;
406
407 goto retryfull;
408 }
409
410 head = &bh1->chain;
411
412 list_for_each_entry_safe(this, next, head, list) {
413 if (match_futex (&this->key, &key1)) {
414 wake_futex(this);
415 if (++ret >= nr_wake)
416 break;
417 }
418 }
419
420 if (op_ret > 0) {
421 head = &bh2->chain;
422
423 op_ret = 0;
424 list_for_each_entry_safe(this, next, head, list) {
425 if (match_futex (&this->key, &key2)) {
426 wake_futex(this);
427 if (++op_ret >= nr_wake2)
428 break;
429 }
430 }
431 ret += op_ret;
432 }
433
434 spin_unlock(&bh1->lock);
435 if (bh1 != bh2)
436 spin_unlock(&bh2->lock);
437out:
438 up_read(&current->mm->mmap_sem);
439 return ret;
440}
441
442/*
330 * Requeue all waiters hashed on one physical page to another 443 * Requeue all waiters hashed on one physical page to another
331 * physical page. 444 * physical page.
332 */ 445 */
@@ -673,23 +786,17 @@ static int futex_fd(unsigned long uaddr, int signal)
673 filp->f_mapping = filp->f_dentry->d_inode->i_mapping; 786 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
674 787
675 if (signal) { 788 if (signal) {
676 int err;
677 err = f_setown(filp, current->pid, 1); 789 err = f_setown(filp, current->pid, 1);
678 if (err < 0) { 790 if (err < 0) {
679 put_unused_fd(ret); 791 goto error;
680 put_filp(filp);
681 ret = err;
682 goto out;
683 } 792 }
684 filp->f_owner.signum = signal; 793 filp->f_owner.signum = signal;
685 } 794 }
686 795
687 q = kmalloc(sizeof(*q), GFP_KERNEL); 796 q = kmalloc(sizeof(*q), GFP_KERNEL);
688 if (!q) { 797 if (!q) {
689 put_unused_fd(ret); 798 err = -ENOMEM;
690 put_filp(filp); 799 goto error;
691 ret = -ENOMEM;
692 goto out;
693 } 800 }
694 801
695 down_read(&current->mm->mmap_sem); 802 down_read(&current->mm->mmap_sem);
@@ -697,10 +804,8 @@ static int futex_fd(unsigned long uaddr, int signal)
697 804
698 if (unlikely(err != 0)) { 805 if (unlikely(err != 0)) {
699 up_read(&current->mm->mmap_sem); 806 up_read(&current->mm->mmap_sem);
700 put_unused_fd(ret);
701 put_filp(filp);
702 kfree(q); 807 kfree(q);
703 return err; 808 goto error;
704 } 809 }
705 810
706 /* 811 /*
@@ -716,6 +821,11 @@ static int futex_fd(unsigned long uaddr, int signal)
716 fd_install(ret, filp); 821 fd_install(ret, filp);
717out: 822out:
718 return ret; 823 return ret;
824error:
825 put_unused_fd(ret);
826 put_filp(filp);
827 ret = err;
828 goto out;
719} 829}
720 830
721long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout, 831long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
@@ -740,6 +850,9 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
740 case FUTEX_CMP_REQUEUE: 850 case FUTEX_CMP_REQUEUE:
741 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3); 851 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
742 break; 852 break;
853 case FUTEX_WAKE_OP:
854 ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
855 break;
743 default: 856 default:
744 ret = -ENOSYS; 857 ret = -ENOSYS;
745 } 858 }
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
index 388977f3e9b7..0cbe633420fb 100644
--- a/kernel/intermodule.c
+++ b/kernel/intermodule.c
@@ -39,7 +39,7 @@ void inter_module_register(const char *im_name, struct module *owner, const void
39 struct list_head *tmp; 39 struct list_head *tmp;
40 struct inter_module_entry *ime, *ime_new; 40 struct inter_module_entry *ime, *ime_new;
41 41
42 if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) { 42 if (!(ime_new = kzalloc(sizeof(*ime), GFP_KERNEL))) {
43 /* Overloaded kernel, not fatal */ 43 /* Overloaded kernel, not fatal */
44 printk(KERN_ERR 44 printk(KERN_ERR
45 "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n", 45 "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
@@ -47,7 +47,6 @@ void inter_module_register(const char *im_name, struct module *owner, const void
47 kmalloc_failed = 1; 47 kmalloc_failed = 1;
48 return; 48 return;
49 } 49 }
50 memset(ime_new, 0, sizeof(*ime_new));
51 ime_new->im_name = im_name; 50 ime_new->im_name = im_name;
52 ime_new->owner = owner; 51 ime_new->owner = owner;
53 ime_new->userdata = userdata; 52 ime_new->userdata = userdata;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c29f83c16497..3ff7b925c387 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -111,7 +111,7 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
111 unsigned int status; 111 unsigned int status;
112 112
113 kstat_this_cpu.irqs[irq]++; 113 kstat_this_cpu.irqs[irq]++;
114 if (desc->status & IRQ_PER_CPU) { 114 if (CHECK_IRQ_PER_CPU(desc->status)) {
115 irqreturn_t action_ret; 115 irqreturn_t action_ret;
116 116
117 /* 117 /*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ac6700985705..1cfdb08ddf20 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -18,6 +18,10 @@
18 18
19cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; 19cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
20 20
21#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
22cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
23#endif
24
21/** 25/**
22 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 26 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
23 * 27 *
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 85d08daa6600..f26e534c6585 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,12 +19,22 @@ static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS];
19 */ 19 */
20static struct proc_dir_entry *smp_affinity_entry[NR_IRQS]; 20static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
21 21
22void __attribute__((weak)) 22#ifdef CONFIG_GENERIC_PENDING_IRQ
23proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) 23void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
24{
25 /*
26 * Save these away for later use. Re-progam when the
27 * interrupt is pending
28 */
29 set_pending_irq(irq, mask_val);
30}
31#else
32void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
24{ 33{
25 irq_affinity[irq] = mask_val; 34 irq_affinity[irq] = mask_val;
26 irq_desc[irq].handler->set_affinity(irq, mask_val); 35 irq_desc[irq].handler->set_affinity(irq, mask_val);
27} 36}
37#endif
28 38
29static int irq_affinity_read_proc(char *page, char **start, off_t off, 39static int irq_affinity_read_proc(char *page, char **start, off_t off,
30 int count, int *eof, void *data) 40 int count, int *eof, void *data)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b0237122b24e..f3ea492ab44d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleloader.h> 39#include <linux/moduleloader.h>
40#include <asm-generic/sections.h>
40#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
41#include <asm/errno.h> 42#include <asm/errno.h>
42#include <asm/kdebug.h> 43#include <asm/kdebug.h>
@@ -72,7 +73,7 @@ static struct hlist_head kprobe_insn_pages;
72 * get_insn_slot() - Find a slot on an executable page for an instruction. 73 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones. 74 * We allocate an executable page if there's no room on existing ones.
74 */ 75 */
75kprobe_opcode_t *get_insn_slot(void) 76kprobe_opcode_t __kprobes *get_insn_slot(void)
76{ 77{
77 struct kprobe_insn_page *kip; 78 struct kprobe_insn_page *kip;
78 struct hlist_node *pos; 79 struct hlist_node *pos;
@@ -117,7 +118,7 @@ kprobe_opcode_t *get_insn_slot(void)
117 return kip->insns; 118 return kip->insns;
118} 119}
119 120
120void free_insn_slot(kprobe_opcode_t *slot) 121void __kprobes free_insn_slot(kprobe_opcode_t *slot)
121{ 122{
122 struct kprobe_insn_page *kip; 123 struct kprobe_insn_page *kip;
123 struct hlist_node *pos; 124 struct hlist_node *pos;
@@ -152,20 +153,42 @@ void free_insn_slot(kprobe_opcode_t *slot)
152} 153}
153 154
154/* Locks kprobe: irqs must be disabled */ 155/* Locks kprobe: irqs must be disabled */
155void lock_kprobes(void) 156void __kprobes lock_kprobes(void)
156{ 157{
158 unsigned long flags = 0;
159
160 /* Avoiding local interrupts to happen right after we take the kprobe_lock
161 * and before we get a chance to update kprobe_cpu, this to prevent
162 * deadlock when we have a kprobe on ISR routine and a kprobe on task
163 * routine
164 */
165 local_irq_save(flags);
166
157 spin_lock(&kprobe_lock); 167 spin_lock(&kprobe_lock);
158 kprobe_cpu = smp_processor_id(); 168 kprobe_cpu = smp_processor_id();
169
170 local_irq_restore(flags);
159} 171}
160 172
161void unlock_kprobes(void) 173void __kprobes unlock_kprobes(void)
162{ 174{
175 unsigned long flags = 0;
176
177 /* Avoiding local interrupts to happen right after we update
178 * kprobe_cpu and before we get a a chance to release kprobe_lock,
179 * this to prevent deadlock when we have a kprobe on ISR routine and
180 * a kprobe on task routine
181 */
182 local_irq_save(flags);
183
163 kprobe_cpu = NR_CPUS; 184 kprobe_cpu = NR_CPUS;
164 spin_unlock(&kprobe_lock); 185 spin_unlock(&kprobe_lock);
186
187 local_irq_restore(flags);
165} 188}
166 189
167/* You have to be holding the kprobe_lock */ 190/* You have to be holding the kprobe_lock */
168struct kprobe *get_kprobe(void *addr) 191struct kprobe __kprobes *get_kprobe(void *addr)
169{ 192{
170 struct hlist_head *head; 193 struct hlist_head *head;
171 struct hlist_node *node; 194 struct hlist_node *node;
@@ -183,7 +206,7 @@ struct kprobe *get_kprobe(void *addr)
183 * Aggregate handlers for multiple kprobes support - these handlers 206 * Aggregate handlers for multiple kprobes support - these handlers
184 * take care of invoking the individual kprobe handlers on p->list 207 * take care of invoking the individual kprobe handlers on p->list
185 */ 208 */
186static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 209static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
187{ 210{
188 struct kprobe *kp; 211 struct kprobe *kp;
189 212
@@ -198,8 +221,8 @@ static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
198 return 0; 221 return 0;
199} 222}
200 223
201static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 224static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
202 unsigned long flags) 225 unsigned long flags)
203{ 226{
204 struct kprobe *kp; 227 struct kprobe *kp;
205 228
@@ -213,8 +236,8 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
213 return; 236 return;
214} 237}
215 238
216static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 239static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
217 int trapnr) 240 int trapnr)
218{ 241{
219 /* 242 /*
220 * if we faulted "during" the execution of a user specified 243 * if we faulted "during" the execution of a user specified
@@ -227,7 +250,7 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
227 return 0; 250 return 0;
228} 251}
229 252
230static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 253static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
231{ 254{
232 struct kprobe *kp = curr_kprobe; 255 struct kprobe *kp = curr_kprobe;
233 if (curr_kprobe && kp->break_handler) { 256 if (curr_kprobe && kp->break_handler) {
@@ -240,7 +263,7 @@ static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
240 return 0; 263 return 0;
241} 264}
242 265
243struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) 266struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
244{ 267{
245 struct hlist_node *node; 268 struct hlist_node *node;
246 struct kretprobe_instance *ri; 269 struct kretprobe_instance *ri;
@@ -249,7 +272,8 @@ struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
249 return NULL; 272 return NULL;
250} 273}
251 274
252static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) 275static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
276 *rp)
253{ 277{
254 struct hlist_node *node; 278 struct hlist_node *node;
255 struct kretprobe_instance *ri; 279 struct kretprobe_instance *ri;
@@ -258,7 +282,7 @@ static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
258 return NULL; 282 return NULL;
259} 283}
260 284
261void add_rp_inst(struct kretprobe_instance *ri) 285void __kprobes add_rp_inst(struct kretprobe_instance *ri)
262{ 286{
263 /* 287 /*
264 * Remove rp inst off the free list - 288 * Remove rp inst off the free list -
@@ -276,7 +300,7 @@ void add_rp_inst(struct kretprobe_instance *ri)
276 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 300 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
277} 301}
278 302
279void recycle_rp_inst(struct kretprobe_instance *ri) 303void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
280{ 304{
281 /* remove rp inst off the rprobe_inst_table */ 305 /* remove rp inst off the rprobe_inst_table */
282 hlist_del(&ri->hlist); 306 hlist_del(&ri->hlist);
@@ -291,7 +315,7 @@ void recycle_rp_inst(struct kretprobe_instance *ri)
291 kfree(ri); 315 kfree(ri);
292} 316}
293 317
294struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk) 318struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
295{ 319{
296 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 320 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
297} 321}
@@ -302,7 +326,7 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
302 * instances associated with this task. These left over instances represent 326 * instances associated with this task. These left over instances represent
303 * probed functions that have been called but will never return. 327 * probed functions that have been called but will never return.
304 */ 328 */
305void kprobe_flush_task(struct task_struct *tk) 329void __kprobes kprobe_flush_task(struct task_struct *tk)
306{ 330{
307 struct kretprobe_instance *ri; 331 struct kretprobe_instance *ri;
308 struct hlist_head *head; 332 struct hlist_head *head;
@@ -322,7 +346,8 @@ void kprobe_flush_task(struct task_struct *tk)
322 * This kprobe pre_handler is registered with every kretprobe. When probe 346 * This kprobe pre_handler is registered with every kretprobe. When probe
323 * hits it will set up the return probe. 347 * hits it will set up the return probe.
324 */ 348 */
325static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 349static int __kprobes pre_handler_kretprobe(struct kprobe *p,
350 struct pt_regs *regs)
326{ 351{
327 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 352 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
328 353
@@ -353,7 +378,7 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
353* Add the new probe to old_p->list. Fail if this is the 378* Add the new probe to old_p->list. Fail if this is the
354* second jprobe at the address - two jprobes can't coexist 379* second jprobe at the address - two jprobes can't coexist
355*/ 380*/
356static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 381static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
357{ 382{
358 struct kprobe *kp; 383 struct kprobe *kp;
359 384
@@ -395,7 +420,8 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
395 * the intricacies 420 * the intricacies
396 * TODO: Move kcalloc outside the spinlock 421 * TODO: Move kcalloc outside the spinlock
397 */ 422 */
398static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) 423static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
424 struct kprobe *p)
399{ 425{
400 int ret = 0; 426 int ret = 0;
401 struct kprobe *ap; 427 struct kprobe *ap;
@@ -434,15 +460,25 @@ static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
434 spin_unlock_irqrestore(&kprobe_lock, flags); 460 spin_unlock_irqrestore(&kprobe_lock, flags);
435} 461}
436 462
437int register_kprobe(struct kprobe *p) 463static int __kprobes in_kprobes_functions(unsigned long addr)
464{
465 if (addr >= (unsigned long)__kprobes_text_start
466 && addr < (unsigned long)__kprobes_text_end)
467 return -EINVAL;
468 return 0;
469}
470
471int __kprobes register_kprobe(struct kprobe *p)
438{ 472{
439 int ret = 0; 473 int ret = 0;
440 unsigned long flags = 0; 474 unsigned long flags = 0;
441 struct kprobe *old_p; 475 struct kprobe *old_p;
442 476
443 if ((ret = arch_prepare_kprobe(p)) != 0) { 477 if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
478 return ret;
479 if ((ret = arch_prepare_kprobe(p)) != 0)
444 goto rm_kprobe; 480 goto rm_kprobe;
445 } 481
446 spin_lock_irqsave(&kprobe_lock, flags); 482 spin_lock_irqsave(&kprobe_lock, flags);
447 old_p = get_kprobe(p->addr); 483 old_p = get_kprobe(p->addr);
448 p->nmissed = 0; 484 p->nmissed = 0;
@@ -466,7 +502,7 @@ rm_kprobe:
466 return ret; 502 return ret;
467} 503}
468 504
469void unregister_kprobe(struct kprobe *p) 505void __kprobes unregister_kprobe(struct kprobe *p)
470{ 506{
471 unsigned long flags; 507 unsigned long flags;
472 struct kprobe *old_p; 508 struct kprobe *old_p;
@@ -487,7 +523,7 @@ static struct notifier_block kprobe_exceptions_nb = {
487 .priority = 0x7fffffff /* we need to notified first */ 523 .priority = 0x7fffffff /* we need to notified first */
488}; 524};
489 525
490int register_jprobe(struct jprobe *jp) 526int __kprobes register_jprobe(struct jprobe *jp)
491{ 527{
492 /* Todo: Verify probepoint is a function entry point */ 528 /* Todo: Verify probepoint is a function entry point */
493 jp->kp.pre_handler = setjmp_pre_handler; 529 jp->kp.pre_handler = setjmp_pre_handler;
@@ -496,14 +532,14 @@ int register_jprobe(struct jprobe *jp)
496 return register_kprobe(&jp->kp); 532 return register_kprobe(&jp->kp);
497} 533}
498 534
499void unregister_jprobe(struct jprobe *jp) 535void __kprobes unregister_jprobe(struct jprobe *jp)
500{ 536{
501 unregister_kprobe(&jp->kp); 537 unregister_kprobe(&jp->kp);
502} 538}
503 539
504#ifdef ARCH_SUPPORTS_KRETPROBES 540#ifdef ARCH_SUPPORTS_KRETPROBES
505 541
506int register_kretprobe(struct kretprobe *rp) 542int __kprobes register_kretprobe(struct kretprobe *rp)
507{ 543{
508 int ret = 0; 544 int ret = 0;
509 struct kretprobe_instance *inst; 545 struct kretprobe_instance *inst;
@@ -540,14 +576,14 @@ int register_kretprobe(struct kretprobe *rp)
540 576
541#else /* ARCH_SUPPORTS_KRETPROBES */ 577#else /* ARCH_SUPPORTS_KRETPROBES */
542 578
543int register_kretprobe(struct kretprobe *rp) 579int __kprobes register_kretprobe(struct kretprobe *rp)
544{ 580{
545 return -ENOSYS; 581 return -ENOSYS;
546} 582}
547 583
548#endif /* ARCH_SUPPORTS_KRETPROBES */ 584#endif /* ARCH_SUPPORTS_KRETPROBES */
549 585
550void unregister_kretprobe(struct kretprobe *rp) 586void __kprobes unregister_kretprobe(struct kretprobe *rp)
551{ 587{
552 unsigned long flags; 588 unsigned long flags;
553 struct kretprobe_instance *ri; 589 struct kretprobe_instance *ri;
diff --git a/kernel/module.c b/kernel/module.c
index c32995fbd8fd..4b39d3793c72 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1509,6 +1509,7 @@ static struct module *load_module(void __user *umod,
1509 long err = 0; 1509 long err = 0;
1510 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1510 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
1511 struct exception_table_entry *extable; 1511 struct exception_table_entry *extable;
1512 mm_segment_t old_fs;
1512 1513
1513 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n", 1514 DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
1514 umod, len, uargs); 1515 umod, len, uargs);
@@ -1779,6 +1780,24 @@ static struct module *load_module(void __user *umod,
1779 if (err < 0) 1780 if (err < 0)
1780 goto cleanup; 1781 goto cleanup;
1781 1782
1783 /* flush the icache in correct context */
1784 old_fs = get_fs();
1785 set_fs(KERNEL_DS);
1786
1787 /*
1788 * Flush the instruction cache, since we've played with text.
1789 * Do it before processing of module parameters, so the module
1790 * can provide parameter accessor functions of its own.
1791 */
1792 if (mod->module_init)
1793 flush_icache_range((unsigned long)mod->module_init,
1794 (unsigned long)mod->module_init
1795 + mod->init_size);
1796 flush_icache_range((unsigned long)mod->module_core,
1797 (unsigned long)mod->module_core + mod->core_size);
1798
1799 set_fs(old_fs);
1800
1782 mod->args = args; 1801 mod->args = args;
1783 if (obsparmindex) { 1802 if (obsparmindex) {
1784 err = obsolete_params(mod->name, mod->args, 1803 err = obsolete_params(mod->name, mod->args,
@@ -1860,7 +1879,6 @@ sys_init_module(void __user *umod,
1860 const char __user *uargs) 1879 const char __user *uargs)
1861{ 1880{
1862 struct module *mod; 1881 struct module *mod;
1863 mm_segment_t old_fs = get_fs();
1864 int ret = 0; 1882 int ret = 0;
1865 1883
1866 /* Must have permission */ 1884 /* Must have permission */
@@ -1878,19 +1896,6 @@ sys_init_module(void __user *umod,
1878 return PTR_ERR(mod); 1896 return PTR_ERR(mod);
1879 } 1897 }
1880 1898
1881 /* flush the icache in correct context */
1882 set_fs(KERNEL_DS);
1883
1884 /* Flush the instruction cache, since we've played with text */
1885 if (mod->module_init)
1886 flush_icache_range((unsigned long)mod->module_init,
1887 (unsigned long)mod->module_init
1888 + mod->init_size);
1889 flush_icache_range((unsigned long)mod->module_core,
1890 (unsigned long)mod->module_core + mod->core_size);
1891
1892 set_fs(old_fs);
1893
1894 /* Now sew it into the lists. They won't access us, since 1899 /* Now sew it into the lists. They won't access us, since
1895 strong_try_module_get() will fail. */ 1900 strong_try_module_get() will fail. */
1896 stop_machine_run(__link_module, mod, NR_CPUS); 1901 stop_machine_run(__link_module, mod, NR_CPUS);
diff --git a/kernel/params.c b/kernel/params.c
index d586c35ef8fc..fbf173215fd2 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -542,8 +542,8 @@ static void __init kernel_param_sysfs_setup(const char *name,
542{ 542{
543 struct module_kobject *mk; 543 struct module_kobject *mk;
544 544
545 mk = kmalloc(sizeof(struct module_kobject), GFP_KERNEL); 545 mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
546 memset(mk, 0, sizeof(struct module_kobject)); 546 BUG_ON(!mk);
547 547
548 mk->mod = THIS_MODULE; 548 mk->mod = THIS_MODULE;
549 kobj_set_kset_s(mk, module_subsys); 549 kobj_set_kset_s(mk, module_subsys);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 38798a2ff994..b7b532acd9fc 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -427,21 +427,23 @@ int posix_timer_event(struct k_itimer *timr,int si_private)
427 timr->sigq->info.si_code = SI_TIMER; 427 timr->sigq->info.si_code = SI_TIMER;
428 timr->sigq->info.si_tid = timr->it_id; 428 timr->sigq->info.si_tid = timr->it_id;
429 timr->sigq->info.si_value = timr->it_sigev_value; 429 timr->sigq->info.si_value = timr->it_sigev_value;
430
430 if (timr->it_sigev_notify & SIGEV_THREAD_ID) { 431 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
431 if (unlikely(timr->it_process->flags & PF_EXITING)) { 432 struct task_struct *leader;
432 timr->it_sigev_notify = SIGEV_SIGNAL; 433 int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
433 put_task_struct(timr->it_process); 434 timr->it_process);
434 timr->it_process = timr->it_process->group_leader; 435
435 goto group; 436 if (likely(ret >= 0))
436 } 437 return ret;
437 return send_sigqueue(timr->it_sigev_signo, timr->sigq, 438
438 timr->it_process); 439 timr->it_sigev_notify = SIGEV_SIGNAL;
439 } 440 leader = timr->it_process->group_leader;
440 else { 441 put_task_struct(timr->it_process);
441 group: 442 timr->it_process = leader;
442 return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
443 timr->it_process);
444 } 443 }
444
445 return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
446 timr->it_process);
445} 447}
446EXPORT_SYMBOL_GPL(posix_timer_event); 448EXPORT_SYMBOL_GPL(posix_timer_event);
447 449
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 917066a5767c..c14cd9991181 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -28,7 +28,7 @@ config PM_DEBUG
28 28
29config SOFTWARE_SUSPEND 29config SOFTWARE_SUSPEND
30 bool "Software Suspend" 30 bool "Software Suspend"
31 depends on EXPERIMENTAL && PM && SWAP && ((X86 && SMP) || ((FVR || PPC32 || X86) && !SMP)) 31 depends on PM && SWAP && (X86 || ((FVR || PPC32) && !SMP))
32 ---help--- 32 ---help---
33 Enable the possibility of suspending the machine. 33 Enable the possibility of suspending the machine.
34 It doesn't need APM. 34 It doesn't need APM.
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 61deda04e39e..159149321b3c 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -60,9 +60,8 @@ struct pm_dev *pm_register(pm_dev_t type,
60 unsigned long id, 60 unsigned long id,
61 pm_callback callback) 61 pm_callback callback)
62{ 62{
63 struct pm_dev *dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL); 63 struct pm_dev *dev = kzalloc(sizeof(struct pm_dev), GFP_KERNEL);
64 if (dev) { 64 if (dev) {
65 memset(dev, 0, sizeof(*dev));
66 dev->type = type; 65 dev->type = type;
67 dev->id = id; 66 dev->id = id;
68 dev->callback = callback; 67 dev->callback = callback;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index eaacd5cb5889..d967e875ee82 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1059,6 +1059,7 @@ int swsusp_resume(void)
1059 BUG_ON(!error); 1059 BUG_ON(!error);
1060 restore_processor_state(); 1060 restore_processor_state();
1061 restore_highmem(); 1061 restore_highmem();
1062 touch_softlockup_watchdog();
1062 device_power_up(); 1063 device_power_up();
1063 local_irq_enable(); 1064 local_irq_enable();
1064 return error; 1065 return error;
diff --git a/kernel/printk.c b/kernel/printk.c
index 5092397fac29..a967605bc2e3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -514,6 +514,9 @@ asmlinkage int printk(const char *fmt, ...)
514 return r; 514 return r;
515} 515}
516 516
517/* cpu currently holding logbuf_lock */
518static volatile unsigned int printk_cpu = UINT_MAX;
519
517asmlinkage int vprintk(const char *fmt, va_list args) 520asmlinkage int vprintk(const char *fmt, va_list args)
518{ 521{
519 unsigned long flags; 522 unsigned long flags;
@@ -522,11 +525,15 @@ asmlinkage int vprintk(const char *fmt, va_list args)
522 static char printk_buf[1024]; 525 static char printk_buf[1024];
523 static int log_level_unknown = 1; 526 static int log_level_unknown = 1;
524 527
525 if (unlikely(oops_in_progress)) 528 preempt_disable();
529 if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id())
530 /* If a crash is occurring during printk() on this CPU,
531 * make sure we can't deadlock */
526 zap_locks(); 532 zap_locks();
527 533
528 /* This stops the holder of console_sem just where we want him */ 534 /* This stops the holder of console_sem just where we want him */
529 spin_lock_irqsave(&logbuf_lock, flags); 535 spin_lock_irqsave(&logbuf_lock, flags);
536 printk_cpu = smp_processor_id();
530 537
531 /* Emit the output into the temporary buffer */ 538 /* Emit the output into the temporary buffer */
532 printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args); 539 printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args);
@@ -595,6 +602,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
595 * CPU until it is officially up. We shouldn't be calling into 602 * CPU until it is officially up. We shouldn't be calling into
596 * random console drivers on a CPU which doesn't exist yet.. 603 * random console drivers on a CPU which doesn't exist yet..
597 */ 604 */
605 printk_cpu = UINT_MAX;
598 spin_unlock_irqrestore(&logbuf_lock, flags); 606 spin_unlock_irqrestore(&logbuf_lock, flags);
599 goto out; 607 goto out;
600 } 608 }
@@ -604,6 +612,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
604 * We own the drivers. We can drop the spinlock and let 612 * We own the drivers. We can drop the spinlock and let
605 * release_console_sem() print the text 613 * release_console_sem() print the text
606 */ 614 */
615 printk_cpu = UINT_MAX;
607 spin_unlock_irqrestore(&logbuf_lock, flags); 616 spin_unlock_irqrestore(&logbuf_lock, flags);
608 console_may_schedule = 0; 617 console_may_schedule = 0;
609 release_console_sem(); 618 release_console_sem();
@@ -613,9 +622,11 @@ asmlinkage int vprintk(const char *fmt, va_list args)
613 * allows the semaphore holder to proceed and to call the 622 * allows the semaphore holder to proceed and to call the
614 * console drivers with the output which we just produced. 623 * console drivers with the output which we just produced.
615 */ 624 */
625 printk_cpu = UINT_MAX;
616 spin_unlock_irqrestore(&logbuf_lock, flags); 626 spin_unlock_irqrestore(&logbuf_lock, flags);
617 } 627 }
618out: 628out:
629 preempt_enable();
619 return printed_len; 630 return printed_len;
620} 631}
621EXPORT_SYMBOL(printk); 632EXPORT_SYMBOL(printk);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 8dcb8f6288bc..019e04ec065a 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -118,6 +118,33 @@ int ptrace_check_attach(struct task_struct *child, int kill)
118 return ret; 118 return ret;
119} 119}
120 120
121static int may_attach(struct task_struct *task)
122{
123 if (!task->mm)
124 return -EPERM;
125 if (((current->uid != task->euid) ||
126 (current->uid != task->suid) ||
127 (current->uid != task->uid) ||
128 (current->gid != task->egid) ||
129 (current->gid != task->sgid) ||
130 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
131 return -EPERM;
132 smp_rmb();
133 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
134 return -EPERM;
135
136 return security_ptrace(current, task);
137}
138
139int ptrace_may_attach(struct task_struct *task)
140{
141 int err;
142 task_lock(task);
143 err = may_attach(task);
144 task_unlock(task);
145 return !err;
146}
147
121int ptrace_attach(struct task_struct *task) 148int ptrace_attach(struct task_struct *task)
122{ 149{
123 int retval; 150 int retval;
@@ -127,22 +154,10 @@ int ptrace_attach(struct task_struct *task)
127 goto bad; 154 goto bad;
128 if (task == current) 155 if (task == current)
129 goto bad; 156 goto bad;
130 if (!task->mm)
131 goto bad;
132 if(((current->uid != task->euid) ||
133 (current->uid != task->suid) ||
134 (current->uid != task->uid) ||
135 (current->gid != task->egid) ||
136 (current->gid != task->sgid) ||
137 (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
138 goto bad;
139 smp_rmb();
140 if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
141 goto bad;
142 /* the same process cannot be attached many times */ 157 /* the same process cannot be attached many times */
143 if (task->ptrace & PT_PTRACED) 158 if (task->ptrace & PT_PTRACED)
144 goto bad; 159 goto bad;
145 retval = security_ptrace(current, task); 160 retval = may_attach(task);
146 if (retval) 161 if (retval)
147 goto bad; 162 goto bad;
148 163
diff --git a/kernel/resource.c b/kernel/resource.c
index 26967e042201..92285d822de6 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -430,10 +430,9 @@ EXPORT_SYMBOL(adjust_resource);
430 */ 430 */
431struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name) 431struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
432{ 432{
433 struct resource *res = kmalloc(sizeof(*res), GFP_KERNEL); 433 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
434 434
435 if (res) { 435 if (res) {
436 memset(res, 0, sizeof(*res));
437 res->name = name; 436 res->name = name;
438 res->start = start; 437 res->start = start;
439 res->end = start + n - 1; 438 res->end = start + n - 1;
diff --git a/kernel/sched.c b/kernel/sched.c
index 5f889d0cbfcc..9508527845df 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4779,7 +4779,7 @@ static int sd_parent_degenerate(struct sched_domain *sd,
4779 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must 4779 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
4780 * hold the hotplug lock. 4780 * hold the hotplug lock.
4781 */ 4781 */
4782void cpu_attach_domain(struct sched_domain *sd, int cpu) 4782static void cpu_attach_domain(struct sched_domain *sd, int cpu)
4783{ 4783{
4784 runqueue_t *rq = cpu_rq(cpu); 4784 runqueue_t *rq = cpu_rq(cpu);
4785 struct sched_domain *tmp; 4785 struct sched_domain *tmp;
@@ -4802,7 +4802,7 @@ void cpu_attach_domain(struct sched_domain *sd, int cpu)
4802} 4802}
4803 4803
4804/* cpus with isolated domains */ 4804/* cpus with isolated domains */
4805cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE; 4805static cpumask_t __devinitdata cpu_isolated_map = CPU_MASK_NONE;
4806 4806
4807/* Setup the mask of cpus configured for isolated domains */ 4807/* Setup the mask of cpus configured for isolated domains */
4808static int __init isolated_cpu_setup(char *str) 4808static int __init isolated_cpu_setup(char *str)
@@ -4830,8 +4830,8 @@ __setup ("isolcpus=", isolated_cpu_setup);
4830 * covered by the given span, and will set each group's ->cpumask correctly, 4830 * covered by the given span, and will set each group's ->cpumask correctly,
4831 * and ->cpu_power to 0. 4831 * and ->cpu_power to 0.
4832 */ 4832 */
4833void init_sched_build_groups(struct sched_group groups[], 4833static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
4834 cpumask_t span, int (*group_fn)(int cpu)) 4834 int (*group_fn)(int cpu))
4835{ 4835{
4836 struct sched_group *first = NULL, *last = NULL; 4836 struct sched_group *first = NULL, *last = NULL;
4837 cpumask_t covered = CPU_MASK_NONE; 4837 cpumask_t covered = CPU_MASK_NONE;
@@ -4864,12 +4864,85 @@ void init_sched_build_groups(struct sched_group groups[],
4864 last->next = first; 4864 last->next = first;
4865} 4865}
4866 4866
4867#define SD_NODES_PER_DOMAIN 16
4867 4868
4868#ifdef ARCH_HAS_SCHED_DOMAIN 4869#ifdef CONFIG_NUMA
4869extern void build_sched_domains(const cpumask_t *cpu_map); 4870/**
4870extern void arch_init_sched_domains(const cpumask_t *cpu_map); 4871 * find_next_best_node - find the next node to include in a sched_domain
4871extern void arch_destroy_sched_domains(const cpumask_t *cpu_map); 4872 * @node: node whose sched_domain we're building
4872#else 4873 * @used_nodes: nodes already in the sched_domain
4874 *
4875 * Find the next node to include in a given scheduling domain. Simply
4876 * finds the closest node not already in the @used_nodes map.
4877 *
4878 * Should use nodemask_t.
4879 */
4880static int find_next_best_node(int node, unsigned long *used_nodes)
4881{
4882 int i, n, val, min_val, best_node = 0;
4883
4884 min_val = INT_MAX;
4885
4886 for (i = 0; i < MAX_NUMNODES; i++) {
4887 /* Start at @node */
4888 n = (node + i) % MAX_NUMNODES;
4889
4890 if (!nr_cpus_node(n))
4891 continue;
4892
4893 /* Skip already used nodes */
4894 if (test_bit(n, used_nodes))
4895 continue;
4896
4897 /* Simple min distance search */
4898 val = node_distance(node, n);
4899
4900 if (val < min_val) {
4901 min_val = val;
4902 best_node = n;
4903 }
4904 }
4905
4906 set_bit(best_node, used_nodes);
4907 return best_node;
4908}
4909
4910/**
4911 * sched_domain_node_span - get a cpumask for a node's sched_domain
4912 * @node: node whose cpumask we're constructing
4913 * @size: number of nodes to include in this span
4914 *
4915 * Given a node, construct a good cpumask for its sched_domain to span. It
4916 * should be one that prevents unnecessary balancing, but also spreads tasks
4917 * out optimally.
4918 */
4919static cpumask_t sched_domain_node_span(int node)
4920{
4921 int i;
4922 cpumask_t span, nodemask;
4923 DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
4924
4925 cpus_clear(span);
4926 bitmap_zero(used_nodes, MAX_NUMNODES);
4927
4928 nodemask = node_to_cpumask(node);
4929 cpus_or(span, span, nodemask);
4930 set_bit(node, used_nodes);
4931
4932 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
4933 int next_node = find_next_best_node(node, used_nodes);
4934 nodemask = node_to_cpumask(next_node);
4935 cpus_or(span, span, nodemask);
4936 }
4937
4938 return span;
4939}
4940#endif
4941
4942/*
4943 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
4944 * can switch it on easily if needed.
4945 */
4873#ifdef CONFIG_SCHED_SMT 4946#ifdef CONFIG_SCHED_SMT
4874static DEFINE_PER_CPU(struct sched_domain, cpu_domains); 4947static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
4875static struct sched_group sched_group_cpus[NR_CPUS]; 4948static struct sched_group sched_group_cpus[NR_CPUS];
@@ -4891,36 +4964,20 @@ static int cpu_to_phys_group(int cpu)
4891} 4964}
4892 4965
4893#ifdef CONFIG_NUMA 4966#ifdef CONFIG_NUMA
4894
4895static DEFINE_PER_CPU(struct sched_domain, node_domains);
4896static struct sched_group sched_group_nodes[MAX_NUMNODES];
4897static int cpu_to_node_group(int cpu)
4898{
4899 return cpu_to_node(cpu);
4900}
4901#endif
4902
4903#if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
4904/* 4967/*
4905 * The domains setup code relies on siblings not spanning 4968 * The init_sched_build_groups can't handle what we want to do with node
4906 * multiple nodes. Make sure the architecture has a proper 4969 * groups, so roll our own. Now each node has its own list of groups which
4907 * siblings map: 4970 * gets dynamically allocated.
4908 */ 4971 */
4909static void check_sibling_maps(void) 4972static DEFINE_PER_CPU(struct sched_domain, node_domains);
4910{ 4973static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
4911 int i, j;
4912 4974
4913 for_each_online_cpu(i) { 4975static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
4914 for_each_cpu_mask(j, cpu_sibling_map[i]) { 4976static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS];
4915 if (cpu_to_node(i) != cpu_to_node(j)) { 4977
4916 printk(KERN_INFO "warning: CPU %d siblings map " 4978static int cpu_to_allnodes_group(int cpu)
4917 "to different node - isolating " 4979{
4918 "them.\n", i); 4980 return cpu_to_node(cpu);
4919 cpu_sibling_map[i] = cpumask_of_cpu(i);
4920 break;
4921 }
4922 }
4923 }
4924} 4981}
4925#endif 4982#endif
4926 4983
@@ -4928,9 +4985,24 @@ static void check_sibling_maps(void)
4928 * Build sched domains for a given set of cpus and attach the sched domains 4985 * Build sched domains for a given set of cpus and attach the sched domains
4929 * to the individual cpus 4986 * to the individual cpus
4930 */ 4987 */
4931static void build_sched_domains(const cpumask_t *cpu_map) 4988void build_sched_domains(const cpumask_t *cpu_map)
4932{ 4989{
4933 int i; 4990 int i;
4991#ifdef CONFIG_NUMA
4992 struct sched_group **sched_group_nodes = NULL;
4993 struct sched_group *sched_group_allnodes = NULL;
4994
4995 /*
4996 * Allocate the per-node list of sched groups
4997 */
4998 sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
4999 GFP_ATOMIC);
5000 if (!sched_group_nodes) {
5001 printk(KERN_WARNING "Can not alloc sched group node list\n");
5002 return;
5003 }
5004 sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
5005#endif
4934 5006
4935 /* 5007 /*
4936 * Set up domains for cpus specified by the cpu_map. 5008 * Set up domains for cpus specified by the cpu_map.
@@ -4943,11 +5015,35 @@ static void build_sched_domains(const cpumask_t *cpu_map)
4943 cpus_and(nodemask, nodemask, *cpu_map); 5015 cpus_and(nodemask, nodemask, *cpu_map);
4944 5016
4945#ifdef CONFIG_NUMA 5017#ifdef CONFIG_NUMA
5018 if (cpus_weight(*cpu_map)
5019 > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
5020 if (!sched_group_allnodes) {
5021 sched_group_allnodes
5022 = kmalloc(sizeof(struct sched_group)
5023 * MAX_NUMNODES,
5024 GFP_KERNEL);
5025 if (!sched_group_allnodes) {
5026 printk(KERN_WARNING
5027 "Can not alloc allnodes sched group\n");
5028 break;
5029 }
5030 sched_group_allnodes_bycpu[i]
5031 = sched_group_allnodes;
5032 }
5033 sd = &per_cpu(allnodes_domains, i);
5034 *sd = SD_ALLNODES_INIT;
5035 sd->span = *cpu_map;
5036 group = cpu_to_allnodes_group(i);
5037 sd->groups = &sched_group_allnodes[group];
5038 p = sd;
5039 } else
5040 p = NULL;
5041
4946 sd = &per_cpu(node_domains, i); 5042 sd = &per_cpu(node_domains, i);
4947 group = cpu_to_node_group(i);
4948 *sd = SD_NODE_INIT; 5043 *sd = SD_NODE_INIT;
4949 sd->span = *cpu_map; 5044 sd->span = sched_domain_node_span(cpu_to_node(i));
4950 sd->groups = &sched_group_nodes[group]; 5045 sd->parent = p;
5046 cpus_and(sd->span, sd->span, *cpu_map);
4951#endif 5047#endif
4952 5048
4953 p = sd; 5049 p = sd;
@@ -4972,7 +5068,7 @@ static void build_sched_domains(const cpumask_t *cpu_map)
4972 5068
4973#ifdef CONFIG_SCHED_SMT 5069#ifdef CONFIG_SCHED_SMT
4974 /* Set up CPU (sibling) groups */ 5070 /* Set up CPU (sibling) groups */
4975 for_each_online_cpu(i) { 5071 for_each_cpu_mask(i, *cpu_map) {
4976 cpumask_t this_sibling_map = cpu_sibling_map[i]; 5072 cpumask_t this_sibling_map = cpu_sibling_map[i];
4977 cpus_and(this_sibling_map, this_sibling_map, *cpu_map); 5073 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
4978 if (i != first_cpu(this_sibling_map)) 5074 if (i != first_cpu(this_sibling_map))
@@ -4997,8 +5093,77 @@ static void build_sched_domains(const cpumask_t *cpu_map)
4997 5093
4998#ifdef CONFIG_NUMA 5094#ifdef CONFIG_NUMA
4999 /* Set up node groups */ 5095 /* Set up node groups */
5000 init_sched_build_groups(sched_group_nodes, *cpu_map, 5096 if (sched_group_allnodes)
5001 &cpu_to_node_group); 5097 init_sched_build_groups(sched_group_allnodes, *cpu_map,
5098 &cpu_to_allnodes_group);
5099
5100 for (i = 0; i < MAX_NUMNODES; i++) {
5101 /* Set up node groups */
5102 struct sched_group *sg, *prev;
5103 cpumask_t nodemask = node_to_cpumask(i);
5104 cpumask_t domainspan;
5105 cpumask_t covered = CPU_MASK_NONE;
5106 int j;
5107
5108 cpus_and(nodemask, nodemask, *cpu_map);
5109 if (cpus_empty(nodemask)) {
5110 sched_group_nodes[i] = NULL;
5111 continue;
5112 }
5113
5114 domainspan = sched_domain_node_span(i);
5115 cpus_and(domainspan, domainspan, *cpu_map);
5116
5117 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
5118 sched_group_nodes[i] = sg;
5119 for_each_cpu_mask(j, nodemask) {
5120 struct sched_domain *sd;
5121 sd = &per_cpu(node_domains, j);
5122 sd->groups = sg;
5123 if (sd->groups == NULL) {
5124 /* Turn off balancing if we have no groups */
5125 sd->flags = 0;
5126 }
5127 }
5128 if (!sg) {
5129 printk(KERN_WARNING
5130 "Can not alloc domain group for node %d\n", i);
5131 continue;
5132 }
5133 sg->cpu_power = 0;
5134 sg->cpumask = nodemask;
5135 cpus_or(covered, covered, nodemask);
5136 prev = sg;
5137
5138 for (j = 0; j < MAX_NUMNODES; j++) {
5139 cpumask_t tmp, notcovered;
5140 int n = (i + j) % MAX_NUMNODES;
5141
5142 cpus_complement(notcovered, covered);
5143 cpus_and(tmp, notcovered, *cpu_map);
5144 cpus_and(tmp, tmp, domainspan);
5145 if (cpus_empty(tmp))
5146 break;
5147
5148 nodemask = node_to_cpumask(n);
5149 cpus_and(tmp, tmp, nodemask);
5150 if (cpus_empty(tmp))
5151 continue;
5152
5153 sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
5154 if (!sg) {
5155 printk(KERN_WARNING
5156 "Can not alloc domain group for node %d\n", j);
5157 break;
5158 }
5159 sg->cpu_power = 0;
5160 sg->cpumask = tmp;
5161 cpus_or(covered, covered, tmp);
5162 prev->next = sg;
5163 prev = sg;
5164 }
5165 prev->next = sched_group_nodes[i];
5166 }
5002#endif 5167#endif
5003 5168
5004 /* Calculate CPU power for physical packages and nodes */ 5169 /* Calculate CPU power for physical packages and nodes */
@@ -5017,14 +5182,46 @@ static void build_sched_domains(const cpumask_t *cpu_map)
5017 sd->groups->cpu_power = power; 5182 sd->groups->cpu_power = power;
5018 5183
5019#ifdef CONFIG_NUMA 5184#ifdef CONFIG_NUMA
5020 if (i == first_cpu(sd->groups->cpumask)) { 5185 sd = &per_cpu(allnodes_domains, i);
5021 /* Only add "power" once for each physical package. */ 5186 if (sd->groups) {
5022 sd = &per_cpu(node_domains, i); 5187 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5023 sd->groups->cpu_power += power; 5188 (cpus_weight(sd->groups->cpumask)-1) / 10;
5189 sd->groups->cpu_power = power;
5024 } 5190 }
5025#endif 5191#endif
5026 } 5192 }
5027 5193
5194#ifdef CONFIG_NUMA
5195 for (i = 0; i < MAX_NUMNODES; i++) {
5196 struct sched_group *sg = sched_group_nodes[i];
5197 int j;
5198
5199 if (sg == NULL)
5200 continue;
5201next_sg:
5202 for_each_cpu_mask(j, sg->cpumask) {
5203 struct sched_domain *sd;
5204 int power;
5205
5206 sd = &per_cpu(phys_domains, j);
5207 if (j != first_cpu(sd->groups->cpumask)) {
5208 /*
5209 * Only add "power" once for each
5210 * physical package.
5211 */
5212 continue;
5213 }
5214 power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
5215 (cpus_weight(sd->groups->cpumask)-1) / 10;
5216
5217 sg->cpu_power += power;
5218 }
5219 sg = sg->next;
5220 if (sg != sched_group_nodes[i])
5221 goto next_sg;
5222 }
5223#endif
5224
5028 /* Attach the domains */ 5225 /* Attach the domains */
5029 for_each_cpu_mask(i, *cpu_map) { 5226 for_each_cpu_mask(i, *cpu_map) {
5030 struct sched_domain *sd; 5227 struct sched_domain *sd;
@@ -5039,13 +5236,10 @@ static void build_sched_domains(const cpumask_t *cpu_map)
5039/* 5236/*
5040 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 5237 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
5041 */ 5238 */
5042static void arch_init_sched_domains(cpumask_t *cpu_map) 5239static void arch_init_sched_domains(const cpumask_t *cpu_map)
5043{ 5240{
5044 cpumask_t cpu_default_map; 5241 cpumask_t cpu_default_map;
5045 5242
5046#if defined(CONFIG_SCHED_SMT) && defined(CONFIG_NUMA)
5047 check_sibling_maps();
5048#endif
5049 /* 5243 /*
5050 * Setup mask for cpus without special case scheduling requirements. 5244 * Setup mask for cpus without special case scheduling requirements.
5051 * For now this just excludes isolated cpus, but could be used to 5245 * For now this just excludes isolated cpus, but could be used to
@@ -5058,10 +5252,47 @@ static void arch_init_sched_domains(cpumask_t *cpu_map)
5058 5252
5059static void arch_destroy_sched_domains(const cpumask_t *cpu_map) 5253static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
5060{ 5254{
5061 /* Do nothing: everything is statically allocated. */ 5255#ifdef CONFIG_NUMA
5062} 5256 int i;
5257 int cpu;
5258
5259 for_each_cpu_mask(cpu, *cpu_map) {
5260 struct sched_group *sched_group_allnodes
5261 = sched_group_allnodes_bycpu[cpu];
5262 struct sched_group **sched_group_nodes
5263 = sched_group_nodes_bycpu[cpu];
5264
5265 if (sched_group_allnodes) {
5266 kfree(sched_group_allnodes);
5267 sched_group_allnodes_bycpu[cpu] = NULL;
5268 }
5269
5270 if (!sched_group_nodes)
5271 continue;
5272
5273 for (i = 0; i < MAX_NUMNODES; i++) {
5274 cpumask_t nodemask = node_to_cpumask(i);
5275 struct sched_group *oldsg, *sg = sched_group_nodes[i];
5063 5276
5064#endif /* ARCH_HAS_SCHED_DOMAIN */ 5277 cpus_and(nodemask, nodemask, *cpu_map);
5278 if (cpus_empty(nodemask))
5279 continue;
5280
5281 if (sg == NULL)
5282 continue;
5283 sg = sg->next;
5284next_sg:
5285 oldsg = sg;
5286 sg = sg->next;
5287 kfree(oldsg);
5288 if (oldsg != sched_group_nodes[i])
5289 goto next_sg;
5290 }
5291 kfree(sched_group_nodes);
5292 sched_group_nodes_bycpu[cpu] = NULL;
5293 }
5294#endif
5295}
5065 5296
5066/* 5297/*
5067 * Detach sched domains from a group of cpus specified in cpu_map 5298 * Detach sched domains from a group of cpus specified in cpu_map
diff --git a/kernel/signal.c b/kernel/signal.c
index d282fea81138..4980a073237f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -678,7 +678,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
678 678
679/* forward decl */ 679/* forward decl */
680static void do_notify_parent_cldstop(struct task_struct *tsk, 680static void do_notify_parent_cldstop(struct task_struct *tsk,
681 struct task_struct *parent, 681 int to_self,
682 int why); 682 int why);
683 683
684/* 684/*
@@ -729,14 +729,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
729 p->signal->group_stop_count = 0; 729 p->signal->group_stop_count = 0;
730 p->signal->flags = SIGNAL_STOP_CONTINUED; 730 p->signal->flags = SIGNAL_STOP_CONTINUED;
731 spin_unlock(&p->sighand->siglock); 731 spin_unlock(&p->sighand->siglock);
732 if (p->ptrace & PT_PTRACED) 732 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED);
733 do_notify_parent_cldstop(p, p->parent,
734 CLD_STOPPED);
735 else
736 do_notify_parent_cldstop(
737 p->group_leader,
738 p->group_leader->real_parent,
739 CLD_STOPPED);
740 spin_lock(&p->sighand->siglock); 733 spin_lock(&p->sighand->siglock);
741 } 734 }
742 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 735 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -777,14 +770,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
777 p->signal->flags = SIGNAL_STOP_CONTINUED; 770 p->signal->flags = SIGNAL_STOP_CONTINUED;
778 p->signal->group_exit_code = 0; 771 p->signal->group_exit_code = 0;
779 spin_unlock(&p->sighand->siglock); 772 spin_unlock(&p->sighand->siglock);
780 if (p->ptrace & PT_PTRACED) 773 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED);
781 do_notify_parent_cldstop(p, p->parent,
782 CLD_CONTINUED);
783 else
784 do_notify_parent_cldstop(
785 p->group_leader,
786 p->group_leader->real_parent,
787 CLD_CONTINUED);
788 spin_lock(&p->sighand->siglock); 774 spin_lock(&p->sighand->siglock);
789 } else { 775 } else {
790 /* 776 /*
@@ -1380,16 +1366,16 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1380 unsigned long flags; 1366 unsigned long flags;
1381 int ret = 0; 1367 int ret = 0;
1382 1368
1383 /*
1384 * We need the tasklist lock even for the specific
1385 * thread case (when we don't need to follow the group
1386 * lists) in order to avoid races with "p->sighand"
1387 * going away or changing from under us.
1388 */
1389 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1369 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1390 read_lock(&tasklist_lock); 1370 read_lock(&tasklist_lock);
1371
1372 if (unlikely(p->flags & PF_EXITING)) {
1373 ret = -1;
1374 goto out_err;
1375 }
1376
1391 spin_lock_irqsave(&p->sighand->siglock, flags); 1377 spin_lock_irqsave(&p->sighand->siglock, flags);
1392 1378
1393 if (unlikely(!list_empty(&q->list))) { 1379 if (unlikely(!list_empty(&q->list))) {
1394 /* 1380 /*
1395 * If an SI_TIMER entry is already queue just increment 1381 * If an SI_TIMER entry is already queue just increment
@@ -1399,7 +1385,7 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1399 BUG(); 1385 BUG();
1400 q->info.si_overrun++; 1386 q->info.si_overrun++;
1401 goto out; 1387 goto out;
1402 } 1388 }
1403 /* Short-circuit ignored signals. */ 1389 /* Short-circuit ignored signals. */
1404 if (sig_ignored(p, sig)) { 1390 if (sig_ignored(p, sig)) {
1405 ret = 1; 1391 ret = 1;
@@ -1414,8 +1400,10 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1414 1400
1415out: 1401out:
1416 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1402 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1403out_err:
1417 read_unlock(&tasklist_lock); 1404 read_unlock(&tasklist_lock);
1418 return(ret); 1405
1406 return ret;
1419} 1407}
1420 1408
1421int 1409int
@@ -1542,14 +1530,20 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1542 spin_unlock_irqrestore(&psig->siglock, flags); 1530 spin_unlock_irqrestore(&psig->siglock, flags);
1543} 1531}
1544 1532
1545static void 1533static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why)
1546do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
1547 int why)
1548{ 1534{
1549 struct siginfo info; 1535 struct siginfo info;
1550 unsigned long flags; 1536 unsigned long flags;
1537 struct task_struct *parent;
1551 struct sighand_struct *sighand; 1538 struct sighand_struct *sighand;
1552 1539
1540 if (to_self)
1541 parent = tsk->parent;
1542 else {
1543 tsk = tsk->group_leader;
1544 parent = tsk->real_parent;
1545 }
1546
1553 info.si_signo = SIGCHLD; 1547 info.si_signo = SIGCHLD;
1554 info.si_errno = 0; 1548 info.si_errno = 0;
1555 info.si_pid = tsk->pid; 1549 info.si_pid = tsk->pid;
@@ -1618,8 +1612,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1618 !(current->ptrace & PT_ATTACHED)) && 1612 !(current->ptrace & PT_ATTACHED)) &&
1619 (likely(current->parent->signal != current->signal) || 1613 (likely(current->parent->signal != current->signal) ||
1620 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { 1614 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1621 do_notify_parent_cldstop(current, current->parent, 1615 do_notify_parent_cldstop(current, 1, CLD_TRAPPED);
1622 CLD_TRAPPED);
1623 read_unlock(&tasklist_lock); 1616 read_unlock(&tasklist_lock);
1624 schedule(); 1617 schedule();
1625 } else { 1618 } else {
@@ -1668,25 +1661,25 @@ void ptrace_notify(int exit_code)
1668static void 1661static void
1669finish_stop(int stop_count) 1662finish_stop(int stop_count)
1670{ 1663{
1664 int to_self;
1665
1671 /* 1666 /*
1672 * If there are no other threads in the group, or if there is 1667 * If there are no other threads in the group, or if there is
1673 * a group stop in progress and we are the last to stop, 1668 * a group stop in progress and we are the last to stop,
1674 * report to the parent. When ptraced, every thread reports itself. 1669 * report to the parent. When ptraced, every thread reports itself.
1675 */ 1670 */
1676 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) { 1671 if (stop_count < 0 || (current->ptrace & PT_PTRACED))
1677 read_lock(&tasklist_lock); 1672 to_self = 1;
1678 do_notify_parent_cldstop(current, current->parent, 1673 else if (stop_count == 0)
1679 CLD_STOPPED); 1674 to_self = 0;
1680 read_unlock(&tasklist_lock); 1675 else
1681 } 1676 goto out;
1682 else if (stop_count == 0) {
1683 read_lock(&tasklist_lock);
1684 do_notify_parent_cldstop(current->group_leader,
1685 current->group_leader->real_parent,
1686 CLD_STOPPED);
1687 read_unlock(&tasklist_lock);
1688 }
1689 1677
1678 read_lock(&tasklist_lock);
1679 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1680 read_unlock(&tasklist_lock);
1681
1682out:
1690 schedule(); 1683 schedule();
1691 /* 1684 /*
1692 * Now we don't run again until continued. 1685 * Now we don't run again until continued.
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
new file mode 100644
index 000000000000..75976209cea7
--- /dev/null
+++ b/kernel/softlockup.c
@@ -0,0 +1,151 @@
1/*
2 * Detect Soft Lockups
3 *
4 * started by Ingo Molnar, (C) 2005, Red Hat
5 *
6 * this code detects soft lockups: incidents in where on a CPU
7 * the kernel does not reschedule for 10 seconds or more.
8 */
9
10#include <linux/mm.h>
11#include <linux/cpu.h>
12#include <linux/init.h>
13#include <linux/delay.h>
14#include <linux/kthread.h>
15#include <linux/notifier.h>
16#include <linux/module.h>
17
18static DEFINE_SPINLOCK(print_lock);
19
20static DEFINE_PER_CPU(unsigned long, timestamp) = 0;
21static DEFINE_PER_CPU(unsigned long, print_timestamp) = 0;
22static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
23
24static int did_panic = 0;
25static int softlock_panic(struct notifier_block *this, unsigned long event,
26 void *ptr)
27{
28 did_panic = 1;
29
30 return NOTIFY_DONE;
31}
32
33static struct notifier_block panic_block = {
34 .notifier_call = softlock_panic,
35};
36
37void touch_softlockup_watchdog(void)
38{
39 per_cpu(timestamp, raw_smp_processor_id()) = jiffies;
40}
41EXPORT_SYMBOL(touch_softlockup_watchdog);
42
43/*
44 * This callback runs from the timer interrupt, and checks
45 * whether the watchdog thread has hung or not:
46 */
47void softlockup_tick(struct pt_regs *regs)
48{
49 int this_cpu = smp_processor_id();
50 unsigned long timestamp = per_cpu(timestamp, this_cpu);
51
52 if (per_cpu(print_timestamp, this_cpu) == timestamp)
53 return;
54
55 /* Do not cause a second panic when there already was one */
56 if (did_panic)
57 return;
58
59 if (time_after(jiffies, timestamp + 10*HZ)) {
60 per_cpu(print_timestamp, this_cpu) = timestamp;
61
62 spin_lock(&print_lock);
63 printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n",
64 this_cpu);
65 show_regs(regs);
66 spin_unlock(&print_lock);
67 }
68}
69
70/*
71 * The watchdog thread - runs every second and touches the timestamp.
72 */
73static int watchdog(void * __bind_cpu)
74{
75 struct sched_param param = { .sched_priority = 99 };
76 int this_cpu = (long) __bind_cpu;
77
78 printk("softlockup thread %d started up.\n", this_cpu);
79
80 sched_setscheduler(current, SCHED_FIFO, &param);
81 current->flags |= PF_NOFREEZE;
82
83 set_current_state(TASK_INTERRUPTIBLE);
84
85 /*
86 * Run briefly once per second - if this gets delayed for
87 * more than 10 seconds then the debug-printout triggers
88 * in softlockup_tick():
89 */
90 while (!kthread_should_stop()) {
91 msleep_interruptible(1000);
92 touch_softlockup_watchdog();
93 }
94 __set_current_state(TASK_RUNNING);
95
96 return 0;
97}
98
99/*
100 * Create/destroy watchdog threads as CPUs come and go:
101 */
102static int __devinit
103cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
104{
105 int hotcpu = (unsigned long)hcpu;
106 struct task_struct *p;
107
108 switch (action) {
109 case CPU_UP_PREPARE:
110 BUG_ON(per_cpu(watchdog_task, hotcpu));
111 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
112 if (IS_ERR(p)) {
113 printk("watchdog for %i failed\n", hotcpu);
114 return NOTIFY_BAD;
115 }
116 per_cpu(watchdog_task, hotcpu) = p;
117 kthread_bind(p, hotcpu);
118 break;
119 case CPU_ONLINE:
120
121 wake_up_process(per_cpu(watchdog_task, hotcpu));
122 break;
123#ifdef CONFIG_HOTPLUG_CPU
124 case CPU_UP_CANCELED:
125 /* Unbind so it can run. Fall thru. */
126 kthread_bind(per_cpu(watchdog_task, hotcpu), smp_processor_id());
127 case CPU_DEAD:
128 p = per_cpu(watchdog_task, hotcpu);
129 per_cpu(watchdog_task, hotcpu) = NULL;
130 kthread_stop(p);
131 break;
132#endif /* CONFIG_HOTPLUG_CPU */
133 }
134 return NOTIFY_OK;
135}
136
137static struct notifier_block __devinitdata cpu_nfb = {
138 .notifier_call = cpu_callback
139};
140
141__init void spawn_softlockup_task(void)
142{
143 void *cpu = (void *)(long)smp_processor_id();
144
145 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
146 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
147 register_cpu_notifier(&cpu_nfb);
148
149 notifier_chain_register(&panic_notifier_list, &panic_block);
150}
151
diff --git a/kernel/sys.c b/kernel/sys.c
index 0bcaed6560ac..c80412be2302 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1711,7 +1711,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1711 unsigned long arg4, unsigned long arg5) 1711 unsigned long arg4, unsigned long arg5)
1712{ 1712{
1713 long error; 1713 long error;
1714 int sig;
1715 1714
1716 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1715 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1717 if (error) 1716 if (error)
@@ -1719,12 +1718,11 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1719 1718
1720 switch (option) { 1719 switch (option) {
1721 case PR_SET_PDEATHSIG: 1720 case PR_SET_PDEATHSIG:
1722 sig = arg2; 1721 if (!valid_signal(arg2)) {
1723 if (!valid_signal(sig)) {
1724 error = -EINVAL; 1722 error = -EINVAL;
1725 break; 1723 break;
1726 } 1724 }
1727 current->pdeath_signal = sig; 1725 current->pdeath_signal = arg2;
1728 break; 1726 break;
1729 case PR_GET_PDEATHSIG: 1727 case PR_GET_PDEATHSIG:
1730 error = put_user(current->pdeath_signal, (int __user *)arg2); 1728 error = put_user(current->pdeath_signal, (int __user *)arg2);
diff --git a/kernel/timer.c b/kernel/timer.c
index 5377f40723ff..13e2b513be01 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -950,6 +950,7 @@ void do_timer(struct pt_regs *regs)
950{ 950{
951 jiffies_64++; 951 jiffies_64++;
952 update_times(); 952 update_times();
953 softlockup_tick(regs);
953} 954}
954 955
955#ifdef __ARCH_WANT_SYS_ALARM 956#ifdef __ARCH_WANT_SYS_ALARM
@@ -1428,7 +1429,7 @@ static inline u64 time_interpolator_get_cycles(unsigned int src)
1428 } 1429 }
1429} 1430}
1430 1431
1431static inline u64 time_interpolator_get_counter(void) 1432static inline u64 time_interpolator_get_counter(int writelock)
1432{ 1433{
1433 unsigned int src = time_interpolator->source; 1434 unsigned int src = time_interpolator->source;
1434 1435
@@ -1442,6 +1443,15 @@ static inline u64 time_interpolator_get_counter(void)
1442 now = time_interpolator_get_cycles(src); 1443 now = time_interpolator_get_cycles(src);
1443 if (lcycle && time_after(lcycle, now)) 1444 if (lcycle && time_after(lcycle, now))
1444 return lcycle; 1445 return lcycle;
1446
1447 /* When holding the xtime write lock, there's no need
1448 * to add the overhead of the cmpxchg. Readers are
1449 * force to retry until the write lock is released.
1450 */
1451 if (writelock) {
1452 time_interpolator->last_cycle = now;
1453 return now;
1454 }
1445 /* Keep track of the last timer value returned. The use of cmpxchg here 1455 /* Keep track of the last timer value returned. The use of cmpxchg here
1446 * will cause contention in an SMP environment. 1456 * will cause contention in an SMP environment.
1447 */ 1457 */
@@ -1455,7 +1465,7 @@ static inline u64 time_interpolator_get_counter(void)
1455void time_interpolator_reset(void) 1465void time_interpolator_reset(void)
1456{ 1466{
1457 time_interpolator->offset = 0; 1467 time_interpolator->offset = 0;
1458 time_interpolator->last_counter = time_interpolator_get_counter(); 1468 time_interpolator->last_counter = time_interpolator_get_counter(1);
1459} 1469}
1460 1470
1461#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) 1471#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
@@ -1467,7 +1477,7 @@ unsigned long time_interpolator_get_offset(void)
1467 return 0; 1477 return 0;
1468 1478
1469 return time_interpolator->offset + 1479 return time_interpolator->offset +
1470 GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator); 1480 GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
1471} 1481}
1472 1482
1473#define INTERPOLATOR_ADJUST 65536 1483#define INTERPOLATOR_ADJUST 65536
@@ -1490,7 +1500,7 @@ static void time_interpolator_update(long delta_nsec)
1490 * and the tuning logic insures that. 1500 * and the tuning logic insures that.
1491 */ 1501 */
1492 1502
1493 counter = time_interpolator_get_counter(); 1503 counter = time_interpolator_get_counter(1);
1494 offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator); 1504 offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
1495 1505
1496 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) 1506 if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c7e36d4a70ca..91bacb13a7e2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -308,10 +308,9 @@ struct workqueue_struct *__create_workqueue(const char *name,
308 struct workqueue_struct *wq; 308 struct workqueue_struct *wq;
309 struct task_struct *p; 309 struct task_struct *p;
310 310
311 wq = kmalloc(sizeof(*wq), GFP_KERNEL); 311 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
312 if (!wq) 312 if (!wq)
313 return NULL; 313 return NULL;
314 memset(wq, 0, sizeof(*wq));
315 314
316 wq->name = name; 315 wq->name = name;
317 /* We don't need the distraction of CPUs appearing and vanishing. */ 316 /* We don't need the distraction of CPUs appearing and vanishing. */
@@ -499,7 +498,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
499 case CPU_UP_PREPARE: 498 case CPU_UP_PREPARE:
500 /* Create a new workqueue thread for it. */ 499 /* Create a new workqueue thread for it. */
501 list_for_each_entry(wq, &workqueues, list) { 500 list_for_each_entry(wq, &workqueues, list) {
502 if (create_workqueue_thread(wq, hotcpu) < 0) { 501 if (!create_workqueue_thread(wq, hotcpu)) {
503 printk("workqueue for %i failed\n", hotcpu); 502 printk("workqueue for %i failed\n", hotcpu);
504 return NOTIFY_BAD; 503 return NOTIFY_BAD;
505 } 504 }
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 299f7f3b5b08..3754c9a8f5c8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -46,6 +46,25 @@ config LOG_BUF_SHIFT
46 13 => 8 KB 46 13 => 8 KB
47 12 => 4 KB 47 12 => 4 KB
48 48
49config DETECT_SOFTLOCKUP
50 bool "Detect Soft Lockups"
51 depends on DEBUG_KERNEL
52 default y
53 help
54 Say Y here to enable the kernel to detect "soft lockups",
55 which are bugs that cause the kernel to loop in kernel
56 mode for more than 10 seconds, without giving other tasks a
57 chance to run.
58
59 When a soft-lockup is detected, the kernel will print the
60 current stack trace (which you should report), but the
61 system will stay locked up. This feature has negligible
62 overhead.
63
64 (Note that "hard lockups" are separate type of bugs that
65 can be detected via the NMI-watchdog, on platforms that
66 support it.)
67
49config SCHEDSTATS 68config SCHEDSTATS
50 bool "Collect scheduler statistics" 69 bool "Collect scheduler statistics"
51 depends on DEBUG_KERNEL && PROC_FS 70 depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 10bed1c8c3c3..b972dd29289d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (C) 2001 Momchil Velikov 2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig 3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
4 * 5 *
5 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as 7 * modify it under the terms of the GNU General Public License as
@@ -51,7 +52,7 @@ struct radix_tree_node {
51}; 52};
52 53
53struct radix_tree_path { 54struct radix_tree_path {
54 struct radix_tree_node *node, **slot; 55 struct radix_tree_node *node;
55 int offset; 56 int offset;
56}; 57};
57 58
@@ -227,7 +228,7 @@ out:
227int radix_tree_insert(struct radix_tree_root *root, 228int radix_tree_insert(struct radix_tree_root *root,
228 unsigned long index, void *item) 229 unsigned long index, void *item)
229{ 230{
230 struct radix_tree_node *node = NULL, *tmp, **slot; 231 struct radix_tree_node *node = NULL, *slot;
231 unsigned int height, shift; 232 unsigned int height, shift;
232 int offset; 233 int offset;
233 int error; 234 int error;
@@ -240,38 +241,42 @@ int radix_tree_insert(struct radix_tree_root *root,
240 return error; 241 return error;
241 } 242 }
242 243
243 slot = &root->rnode; 244 slot = root->rnode;
244 height = root->height; 245 height = root->height;
245 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 246 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
246 247
247 offset = 0; /* uninitialised var warning */ 248 offset = 0; /* uninitialised var warning */
248 while (height > 0) { 249 while (height > 0) {
249 if (*slot == NULL) { 250 if (slot == NULL) {
250 /* Have to add a child node. */ 251 /* Have to add a child node. */
251 if (!(tmp = radix_tree_node_alloc(root))) 252 if (!(slot = radix_tree_node_alloc(root)))
252 return -ENOMEM; 253 return -ENOMEM;
253 *slot = tmp; 254 if (node) {
254 if (node) 255 node->slots[offset] = slot;
255 node->count++; 256 node->count++;
257 } else
258 root->rnode = slot;
256 } 259 }
257 260
258 /* Go a level down */ 261 /* Go a level down */
259 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 262 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
260 node = *slot; 263 node = slot;
261 slot = (struct radix_tree_node **)(node->slots + offset); 264 slot = node->slots[offset];
262 shift -= RADIX_TREE_MAP_SHIFT; 265 shift -= RADIX_TREE_MAP_SHIFT;
263 height--; 266 height--;
264 } 267 }
265 268
266 if (*slot != NULL) 269 if (slot != NULL)
267 return -EEXIST; 270 return -EEXIST;
271
268 if (node) { 272 if (node) {
269 node->count++; 273 node->count++;
274 node->slots[offset] = item;
270 BUG_ON(tag_get(node, 0, offset)); 275 BUG_ON(tag_get(node, 0, offset));
271 BUG_ON(tag_get(node, 1, offset)); 276 BUG_ON(tag_get(node, 1, offset));
272 } 277 } else
278 root->rnode = item;
273 279
274 *slot = item;
275 return 0; 280 return 0;
276} 281}
277EXPORT_SYMBOL(radix_tree_insert); 282EXPORT_SYMBOL(radix_tree_insert);
@@ -286,27 +291,25 @@ EXPORT_SYMBOL(radix_tree_insert);
286void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 291void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
287{ 292{
288 unsigned int height, shift; 293 unsigned int height, shift;
289 struct radix_tree_node **slot; 294 struct radix_tree_node *slot;
290 295
291 height = root->height; 296 height = root->height;
292 if (index > radix_tree_maxindex(height)) 297 if (index > radix_tree_maxindex(height))
293 return NULL; 298 return NULL;
294 299
295 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 300 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
296 slot = &root->rnode; 301 slot = root->rnode;
297 302
298 while (height > 0) { 303 while (height > 0) {
299 if (*slot == NULL) 304 if (slot == NULL)
300 return NULL; 305 return NULL;
301 306
302 slot = (struct radix_tree_node **) 307 slot = slot->slots[(index >> shift) & RADIX_TREE_MAP_MASK];
303 ((*slot)->slots +
304 ((index >> shift) & RADIX_TREE_MAP_MASK));
305 shift -= RADIX_TREE_MAP_SHIFT; 308 shift -= RADIX_TREE_MAP_SHIFT;
306 height--; 309 height--;
307 } 310 }
308 311
309 return *slot; 312 return slot;
310} 313}
311EXPORT_SYMBOL(radix_tree_lookup); 314EXPORT_SYMBOL(radix_tree_lookup);
312 315
@@ -326,27 +329,27 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
326 unsigned long index, int tag) 329 unsigned long index, int tag)
327{ 330{
328 unsigned int height, shift; 331 unsigned int height, shift;
329 struct radix_tree_node **slot; 332 struct radix_tree_node *slot;
330 333
331 height = root->height; 334 height = root->height;
332 if (index > radix_tree_maxindex(height)) 335 if (index > radix_tree_maxindex(height))
333 return NULL; 336 return NULL;
334 337
335 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 338 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
336 slot = &root->rnode; 339 slot = root->rnode;
337 340
338 while (height > 0) { 341 while (height > 0) {
339 int offset; 342 int offset;
340 343
341 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 344 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
342 tag_set(*slot, tag, offset); 345 tag_set(slot, tag, offset);
343 slot = (struct radix_tree_node **)((*slot)->slots + offset); 346 slot = slot->slots[offset];
344 BUG_ON(*slot == NULL); 347 BUG_ON(slot == NULL);
345 shift -= RADIX_TREE_MAP_SHIFT; 348 shift -= RADIX_TREE_MAP_SHIFT;
346 height--; 349 height--;
347 } 350 }
348 351
349 return *slot; 352 return slot;
350} 353}
351EXPORT_SYMBOL(radix_tree_tag_set); 354EXPORT_SYMBOL(radix_tree_tag_set);
352 355
@@ -367,6 +370,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
367 unsigned long index, int tag) 370 unsigned long index, int tag)
368{ 371{
369 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; 372 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
373 struct radix_tree_node *slot;
370 unsigned int height, shift; 374 unsigned int height, shift;
371 void *ret = NULL; 375 void *ret = NULL;
372 376
@@ -376,38 +380,37 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
376 380
377 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 381 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
378 pathp->node = NULL; 382 pathp->node = NULL;
379 pathp->slot = &root->rnode; 383 slot = root->rnode;
380 384
381 while (height > 0) { 385 while (height > 0) {
382 int offset; 386 int offset;
383 387
384 if (*pathp->slot == NULL) 388 if (slot == NULL)
385 goto out; 389 goto out;
386 390
387 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 391 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
388 pathp[1].offset = offset; 392 pathp[1].offset = offset;
389 pathp[1].node = *pathp[0].slot; 393 pathp[1].node = slot;
390 pathp[1].slot = (struct radix_tree_node **) 394 slot = slot->slots[offset];
391 (pathp[1].node->slots + offset);
392 pathp++; 395 pathp++;
393 shift -= RADIX_TREE_MAP_SHIFT; 396 shift -= RADIX_TREE_MAP_SHIFT;
394 height--; 397 height--;
395 } 398 }
396 399
397 ret = *pathp[0].slot; 400 ret = slot;
398 if (ret == NULL) 401 if (ret == NULL)
399 goto out; 402 goto out;
400 403
401 do { 404 do {
402 int idx; 405 int idx;
403 406
404 tag_clear(pathp[0].node, tag, pathp[0].offset); 407 tag_clear(pathp->node, tag, pathp->offset);
405 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 408 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
406 if (pathp[0].node->tags[tag][idx]) 409 if (pathp->node->tags[tag][idx])
407 goto out; 410 goto out;
408 } 411 }
409 pathp--; 412 pathp--;
410 } while (pathp[0].node); 413 } while (pathp->node);
411out: 414out:
412 return ret; 415 return ret;
413} 416}
@@ -415,21 +418,22 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
415 418
416#ifndef __KERNEL__ /* Only the test harness uses this at present */ 419#ifndef __KERNEL__ /* Only the test harness uses this at present */
417/** 420/**
418 * radix_tree_tag_get - get a tag on a radix tree node 421 * radix_tree_tag_get - get a tag on a radix tree node
419 * @root: radix tree root 422 * @root: radix tree root
420 * @index: index key 423 * @index: index key
421 * @tag: tag index 424 * @tag: tag index
422 * 425 *
423 * Return the search tag corresponging to @index in the radix tree. 426 * Return values:
424 * 427 *
425 * Returns zero if the tag is unset, or if there is no corresponding item 428 * 0: tag not present
426 * in the tree. 429 * 1: tag present, set
430 * -1: tag present, unset
427 */ 431 */
428int radix_tree_tag_get(struct radix_tree_root *root, 432int radix_tree_tag_get(struct radix_tree_root *root,
429 unsigned long index, int tag) 433 unsigned long index, int tag)
430{ 434{
431 unsigned int height, shift; 435 unsigned int height, shift;
432 struct radix_tree_node **slot; 436 struct radix_tree_node *slot;
433 int saw_unset_tag = 0; 437 int saw_unset_tag = 0;
434 438
435 height = root->height; 439 height = root->height;
@@ -437,12 +441,12 @@ int radix_tree_tag_get(struct radix_tree_root *root,
437 return 0; 441 return 0;
438 442
439 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 443 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
440 slot = &root->rnode; 444 slot = root->rnode;
441 445
442 for ( ; ; ) { 446 for ( ; ; ) {
443 int offset; 447 int offset;
444 448
445 if (*slot == NULL) 449 if (slot == NULL)
446 return 0; 450 return 0;
447 451
448 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 452 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -451,15 +455,15 @@ int radix_tree_tag_get(struct radix_tree_root *root,
451 * This is just a debug check. Later, we can bale as soon as 455 * This is just a debug check. Later, we can bale as soon as
452 * we see an unset tag. 456 * we see an unset tag.
453 */ 457 */
454 if (!tag_get(*slot, tag, offset)) 458 if (!tag_get(slot, tag, offset))
455 saw_unset_tag = 1; 459 saw_unset_tag = 1;
456 if (height == 1) { 460 if (height == 1) {
457 int ret = tag_get(*slot, tag, offset); 461 int ret = tag_get(slot, tag, offset);
458 462
459 BUG_ON(ret && saw_unset_tag); 463 BUG_ON(ret && saw_unset_tag);
460 return ret; 464 return ret ? 1 : -1;
461 } 465 }
462 slot = (struct radix_tree_node **)((*slot)->slots + offset); 466 slot = slot->slots[offset];
463 shift -= RADIX_TREE_MAP_SHIFT; 467 shift -= RADIX_TREE_MAP_SHIFT;
464 height--; 468 height--;
465 } 469 }
@@ -472,17 +476,21 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
472 unsigned int max_items, unsigned long *next_index) 476 unsigned int max_items, unsigned long *next_index)
473{ 477{
474 unsigned int nr_found = 0; 478 unsigned int nr_found = 0;
475 unsigned int shift; 479 unsigned int shift, height;
476 unsigned int height = root->height;
477 struct radix_tree_node *slot; 480 struct radix_tree_node *slot;
481 unsigned long i;
482
483 height = root->height;
484 if (height == 0)
485 goto out;
478 486
479 shift = (height-1) * RADIX_TREE_MAP_SHIFT; 487 shift = (height-1) * RADIX_TREE_MAP_SHIFT;
480 slot = root->rnode; 488 slot = root->rnode;
481 489
482 while (height > 0) { 490 for ( ; height > 1; height--) {
483 unsigned long i = (index >> shift) & RADIX_TREE_MAP_MASK;
484 491
485 for ( ; i < RADIX_TREE_MAP_SIZE; i++) { 492 for (i = (index >> shift) & RADIX_TREE_MAP_MASK ;
493 i < RADIX_TREE_MAP_SIZE; i++) {
486 if (slot->slots[i] != NULL) 494 if (slot->slots[i] != NULL)
487 break; 495 break;
488 index &= ~((1UL << shift) - 1); 496 index &= ~((1UL << shift) - 1);
@@ -492,22 +500,20 @@ __lookup(struct radix_tree_root *root, void **results, unsigned long index,
492 } 500 }
493 if (i == RADIX_TREE_MAP_SIZE) 501 if (i == RADIX_TREE_MAP_SIZE)
494 goto out; 502 goto out;
495 height--;
496 if (height == 0) { /* Bottom level: grab some items */
497 unsigned long j = index & RADIX_TREE_MAP_MASK;
498 503
499 for ( ; j < RADIX_TREE_MAP_SIZE; j++) {
500 index++;
501 if (slot->slots[j]) {
502 results[nr_found++] = slot->slots[j];
503 if (nr_found == max_items)
504 goto out;
505 }
506 }
507 }
508 shift -= RADIX_TREE_MAP_SHIFT; 504 shift -= RADIX_TREE_MAP_SHIFT;
509 slot = slot->slots[i]; 505 slot = slot->slots[i];
510 } 506 }
507
508 /* Bottom level: grab some items */
509 for (i = index & RADIX_TREE_MAP_MASK; i < RADIX_TREE_MAP_SIZE; i++) {
510 index++;
511 if (slot->slots[i]) {
512 results[nr_found++] = slot->slots[i];
513 if (nr_found == max_items)
514 goto out;
515 }
516 }
511out: 517out:
512 *next_index = index; 518 *next_index = index;
513 return nr_found; 519 return nr_found;
@@ -655,6 +661,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
655{ 661{
656 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path; 662 struct radix_tree_path path[RADIX_TREE_MAX_PATH], *pathp = path;
657 struct radix_tree_path *orig_pathp; 663 struct radix_tree_path *orig_pathp;
664 struct radix_tree_node *slot;
658 unsigned int height, shift; 665 unsigned int height, shift;
659 void *ret = NULL; 666 void *ret = NULL;
660 char tags[RADIX_TREE_TAGS]; 667 char tags[RADIX_TREE_TAGS];
@@ -666,25 +673,23 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
666 673
667 shift = (height - 1) * RADIX_TREE_MAP_SHIFT; 674 shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
668 pathp->node = NULL; 675 pathp->node = NULL;
669 pathp->slot = &root->rnode; 676 slot = root->rnode;
670 677
671 while (height > 0) { 678 for ( ; height > 0; height--) {
672 int offset; 679 int offset;
673 680
674 if (*pathp->slot == NULL) 681 if (slot == NULL)
675 goto out; 682 goto out;
676 683
677 offset = (index >> shift) & RADIX_TREE_MAP_MASK; 684 offset = (index >> shift) & RADIX_TREE_MAP_MASK;
678 pathp[1].offset = offset; 685 pathp[1].offset = offset;
679 pathp[1].node = *pathp[0].slot; 686 pathp[1].node = slot;
680 pathp[1].slot = (struct radix_tree_node **) 687 slot = slot->slots[offset];
681 (pathp[1].node->slots + offset);
682 pathp++; 688 pathp++;
683 shift -= RADIX_TREE_MAP_SHIFT; 689 shift -= RADIX_TREE_MAP_SHIFT;
684 height--;
685 } 690 }
686 691
687 ret = *pathp[0].slot; 692 ret = slot;
688 if (ret == NULL) 693 if (ret == NULL)
689 goto out; 694 goto out;
690 695
@@ -704,10 +709,10 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
704 if (tags[tag]) 709 if (tags[tag])
705 continue; 710 continue;
706 711
707 tag_clear(pathp[0].node, tag, pathp[0].offset); 712 tag_clear(pathp->node, tag, pathp->offset);
708 713
709 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 714 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
710 if (pathp[0].node->tags[tag][idx]) { 715 if (pathp->node->tags[tag][idx]) {
711 tags[tag] = 1; 716 tags[tag] = 1;
712 nr_cleared_tags--; 717 nr_cleared_tags--;
713 break; 718 break;
@@ -715,18 +720,19 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
715 } 720 }
716 } 721 }
717 pathp--; 722 pathp--;
718 } while (pathp[0].node && nr_cleared_tags); 723 } while (pathp->node && nr_cleared_tags);
719 724
720 pathp = orig_pathp; 725 /* Now free the nodes we do not need anymore */
721 *pathp[0].slot = NULL; 726 for (pathp = orig_pathp; pathp->node; pathp--) {
722 while (pathp[0].node && --pathp[0].node->count == 0) { 727 pathp->node->slots[pathp->offset] = NULL;
723 pathp--; 728 if (--pathp->node->count)
724 BUG_ON(*pathp[0].slot == NULL); 729 goto out;
725 *pathp[0].slot = NULL; 730
726 radix_tree_node_free(pathp[1].node); 731 /* Node with zero slots in use so free it */
732 radix_tree_node_free(pathp->node);
727 } 733 }
728 if (root->rnode == NULL) 734 root->rnode = NULL;
729 root->height = 0; 735 root->height = 0;
730out: 736out:
731 return ret; 737 return ret;
732} 738}
diff --git a/mm/mmap.c b/mm/mmap.c
index 404319477e71..12334aecf8ad 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -61,7 +61,7 @@ pgprot_t protection_map[16] = {
61 61
62int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 62int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
63int sysctl_overcommit_ratio = 50; /* default is 50% */ 63int sysctl_overcommit_ratio = 50; /* default is 50% */
64int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; 64int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
65atomic_t vm_committed_space = ATOMIC_INIT(0); 65atomic_t vm_committed_space = ATOMIC_INIT(0);
66 66
67/* 67/*
@@ -203,13 +203,6 @@ static void remove_vm_struct(struct vm_area_struct *vma)
203 kmem_cache_free(vm_area_cachep, vma); 203 kmem_cache_free(vm_area_cachep, vma);
204} 204}
205 205
206/*
207 * sys_brk() for the most part doesn't need the global kernel
208 * lock, except when an application is doing something nasty
209 * like trying to un-brk an area that has already been mapped
210 * to a regular file. in this case, the unmapping will need
211 * to invoke file system routines that need the global lock.
212 */
213asmlinkage unsigned long sys_brk(unsigned long brk) 206asmlinkage unsigned long sys_brk(unsigned long brk)
214{ 207{
215 unsigned long rlim, retval; 208 unsigned long rlim, retval;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1e56076672f5..5ec8da12cfd9 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -6,8 +6,8 @@
6 * for goading me into coding this file... 6 * for goading me into coding this file...
7 * 7 *
8 * The routines in this file are used to kill a process when 8 * The routines in this file are used to kill a process when
9 * we're seriously out of memory. This gets called from kswapd() 9 * we're seriously out of memory. This gets called from __alloc_pages()
10 * in linux/mm/vmscan.c when we really run out of memory. 10 * in mm/page_alloc.c when we really run out of memory.
11 * 11 *
12 * Since we won't call these routines often (on a well-configured 12 * Since we won't call these routines often (on a well-configured
13 * machine) this file will double as a 'coding guide' and a signpost 13 * machine) this file will double as a 'coding guide' and a signpost
@@ -20,13 +20,14 @@
20#include <linux/swap.h> 20#include <linux/swap.h>
21#include <linux/timex.h> 21#include <linux/timex.h>
22#include <linux/jiffies.h> 22#include <linux/jiffies.h>
23#include <linux/cpuset.h>
23 24
24/* #define DEBUG */ 25/* #define DEBUG */
25 26
26/** 27/**
27 * oom_badness - calculate a numeric value for how bad this task has been 28 * oom_badness - calculate a numeric value for how bad this task has been
28 * @p: task struct of which task we should calculate 29 * @p: task struct of which task we should calculate
29 * @p: current uptime in seconds 30 * @uptime: current uptime in seconds
30 * 31 *
31 * The formula used is relatively simple and documented inline in the 32 * The formula used is relatively simple and documented inline in the
32 * function. The main rationale is that we want to select a good task 33 * function. The main rationale is that we want to select a good task
@@ -57,9 +58,9 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
57 58
58 /* 59 /*
59 * Processes which fork a lot of child processes are likely 60 * Processes which fork a lot of child processes are likely
60 * a good choice. We add the vmsize of the childs if they 61 * a good choice. We add the vmsize of the children if they
61 * have an own mm. This prevents forking servers to flood the 62 * have an own mm. This prevents forking servers to flood the
62 * machine with an endless amount of childs 63 * machine with an endless amount of children
63 */ 64 */
64 list_for_each(tsk, &p->children) { 65 list_for_each(tsk, &p->children) {
65 struct task_struct *chld; 66 struct task_struct *chld;
@@ -143,28 +144,36 @@ static struct task_struct * select_bad_process(void)
143 struct timespec uptime; 144 struct timespec uptime;
144 145
145 do_posix_clock_monotonic_gettime(&uptime); 146 do_posix_clock_monotonic_gettime(&uptime);
146 do_each_thread(g, p) 147 do_each_thread(g, p) {
148 unsigned long points;
149 int releasing;
150
147 /* skip the init task with pid == 1 */ 151 /* skip the init task with pid == 1 */
148 if (p->pid > 1 && p->oomkilladj != OOM_DISABLE) { 152 if (p->pid == 1)
149 unsigned long points; 153 continue;
150 154 if (p->oomkilladj == OOM_DISABLE)
151 /* 155 continue;
152 * This is in the process of releasing memory so wait it 156 /* If p's nodes don't overlap ours, it won't help to kill p. */
153 * to finish before killing some other task by mistake. 157 if (!cpuset_excl_nodes_overlap(p))
154 */ 158 continue;
155 if ((unlikely(test_tsk_thread_flag(p, TIF_MEMDIE)) || (p->flags & PF_EXITING)) && 159
156 !(p->flags & PF_DEAD)) 160 /*
157 return ERR_PTR(-1UL); 161 * This is in the process of releasing memory so for wait it
158 if (p->flags & PF_SWAPOFF) 162 * to finish before killing some other task by mistake.
159 return p; 163 */
160 164 releasing = test_tsk_thread_flag(p, TIF_MEMDIE) ||
161 points = badness(p, uptime.tv_sec); 165 p->flags & PF_EXITING;
162 if (points > maxpoints || !chosen) { 166 if (releasing && !(p->flags & PF_DEAD))
163 chosen = p; 167 return ERR_PTR(-1UL);
164 maxpoints = points; 168 if (p->flags & PF_SWAPOFF)
165 } 169 return p;
170
171 points = badness(p, uptime.tv_sec);
172 if (points > maxpoints || !chosen) {
173 chosen = p;
174 maxpoints = points;
166 } 175 }
167 while_each_thread(g, p); 176 } while_each_thread(g, p);
168 return chosen; 177 return chosen;
169} 178}
170 179
@@ -189,7 +198,8 @@ static void __oom_kill_task(task_t *p)
189 return; 198 return;
190 } 199 }
191 task_unlock(p); 200 task_unlock(p);
192 printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", p->pid, p->comm); 201 printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n",
202 p->pid, p->comm);
193 203
194 /* 204 /*
195 * We give our sacrificial lamb high priority and access to 205 * We give our sacrificial lamb high priority and access to
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b06a9636d971..3974fd81d27c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -42,13 +42,13 @@
42 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 42 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
43 * initializer cleaner 43 * initializer cleaner
44 */ 44 */
45nodemask_t node_online_map = { { [0] = 1UL } }; 45nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
46EXPORT_SYMBOL(node_online_map); 46EXPORT_SYMBOL(node_online_map);
47nodemask_t node_possible_map = NODE_MASK_ALL; 47nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
48EXPORT_SYMBOL(node_possible_map); 48EXPORT_SYMBOL(node_possible_map);
49struct pglist_data *pgdat_list; 49struct pglist_data *pgdat_list __read_mostly;
50unsigned long totalram_pages; 50unsigned long totalram_pages __read_mostly;
51unsigned long totalhigh_pages; 51unsigned long totalhigh_pages __read_mostly;
52long nr_swap_pages; 52long nr_swap_pages;
53 53
54/* 54/*
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(nr_swap_pages);
68 * Used by page_zone() to look up the address of the struct zone whose 68 * Used by page_zone() to look up the address of the struct zone whose
69 * id is encoded in the upper bits of page->flags 69 * id is encoded in the upper bits of page->flags
70 */ 70 */
71struct zone *zone_table[1 << ZONETABLE_SHIFT]; 71struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
72EXPORT_SYMBOL(zone_table); 72EXPORT_SYMBOL(zone_table);
73 73
74static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; 74static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
@@ -806,11 +806,14 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
806 classzone_idx = zone_idx(zones[0]); 806 classzone_idx = zone_idx(zones[0]);
807 807
808restart: 808restart:
809 /* Go through the zonelist once, looking for a zone with enough free */ 809 /*
810 * Go through the zonelist once, looking for a zone with enough free.
811 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
812 */
810 for (i = 0; (z = zones[i]) != NULL; i++) { 813 for (i = 0; (z = zones[i]) != NULL; i++) {
811 int do_reclaim = should_reclaim_zone(z, gfp_mask); 814 int do_reclaim = should_reclaim_zone(z, gfp_mask);
812 815
813 if (!cpuset_zone_allowed(z)) 816 if (!cpuset_zone_allowed(z, __GFP_HARDWALL))
814 continue; 817 continue;
815 818
816 /* 819 /*
@@ -845,6 +848,7 @@ zone_reclaim_retry:
845 * 848 *
846 * This is the last chance, in general, before the goto nopage. 849 * This is the last chance, in general, before the goto nopage.
847 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 850 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
851 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
848 */ 852 */
849 for (i = 0; (z = zones[i]) != NULL; i++) { 853 for (i = 0; (z = zones[i]) != NULL; i++) {
850 if (!zone_watermark_ok(z, order, z->pages_min, 854 if (!zone_watermark_ok(z, order, z->pages_min,
@@ -852,7 +856,7 @@ zone_reclaim_retry:
852 gfp_mask & __GFP_HIGH)) 856 gfp_mask & __GFP_HIGH))
853 continue; 857 continue;
854 858
855 if (wait && !cpuset_zone_allowed(z)) 859 if (wait && !cpuset_zone_allowed(z, gfp_mask))
856 continue; 860 continue;
857 861
858 page = buffered_rmqueue(z, order, gfp_mask); 862 page = buffered_rmqueue(z, order, gfp_mask);
@@ -867,7 +871,7 @@ zone_reclaim_retry:
867 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 871 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
868 /* go through the zonelist yet again, ignoring mins */ 872 /* go through the zonelist yet again, ignoring mins */
869 for (i = 0; (z = zones[i]) != NULL; i++) { 873 for (i = 0; (z = zones[i]) != NULL; i++) {
870 if (!cpuset_zone_allowed(z)) 874 if (!cpuset_zone_allowed(z, gfp_mask))
871 continue; 875 continue;
872 page = buffered_rmqueue(z, order, gfp_mask); 876 page = buffered_rmqueue(z, order, gfp_mask);
873 if (page) 877 if (page)
@@ -903,7 +907,7 @@ rebalance:
903 gfp_mask & __GFP_HIGH)) 907 gfp_mask & __GFP_HIGH))
904 continue; 908 continue;
905 909
906 if (!cpuset_zone_allowed(z)) 910 if (!cpuset_zone_allowed(z, gfp_mask))
907 continue; 911 continue;
908 912
909 page = buffered_rmqueue(z, order, gfp_mask); 913 page = buffered_rmqueue(z, order, gfp_mask);
@@ -922,7 +926,7 @@ rebalance:
922 classzone_idx, 0, 0)) 926 classzone_idx, 0, 0))
923 continue; 927 continue;
924 928
925 if (!cpuset_zone_allowed(z)) 929 if (!cpuset_zone_allowed(z, __GFP_HARDWALL))
926 continue; 930 continue;
927 931
928 page = buffered_rmqueue(z, order, gfp_mask); 932 page = buffered_rmqueue(z, order, gfp_mask);
diff --git a/mm/readahead.c b/mm/readahead.c
index b840e7c6ea74..d0b50034e245 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -540,6 +540,7 @@ void handle_ra_miss(struct address_space *mapping,
540{ 540{
541 ra->flags |= RA_FLAG_MISS; 541 ra->flags |= RA_FLAG_MISS;
542 ra->flags &= ~RA_FLAG_INCACHE; 542 ra->flags &= ~RA_FLAG_INCACHE;
543 ra->cache_hit = 0;
543} 544}
544 545
545/* 546/*
diff --git a/mm/shmem.c b/mm/shmem.c
index bdc4bbb6ddbb..db2c9e8d9909 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -180,7 +180,7 @@ static struct inode_operations shmem_inode_operations;
180static struct inode_operations shmem_dir_inode_operations; 180static struct inode_operations shmem_dir_inode_operations;
181static struct vm_operations_struct shmem_vm_ops; 181static struct vm_operations_struct shmem_vm_ops;
182 182
183static struct backing_dev_info shmem_backing_dev_info = { 183static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
184 .ra_pages = 0, /* No readahead */ 184 .ra_pages = 0, /* No readahead */
185 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 185 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
186 .unplug_io_fn = default_unplug_io_fn, 186 .unplug_io_fn = default_unplug_io_fn,
diff --git a/mm/slab.c b/mm/slab.c
index a9ff4f7f9860..d7c4443991fe 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2558,24 +2558,18 @@ void kmem_cache_free(kmem_cache_t *cachep, void *objp)
2558EXPORT_SYMBOL(kmem_cache_free); 2558EXPORT_SYMBOL(kmem_cache_free);
2559 2559
2560/** 2560/**
2561 * kcalloc - allocate memory for an array. The memory is set to zero. 2561 * kzalloc - allocate memory. The memory is set to zero.
2562 * @n: number of elements. 2562 * @size: how many bytes of memory are required.
2563 * @size: element size.
2564 * @flags: the type of memory to allocate. 2563 * @flags: the type of memory to allocate.
2565 */ 2564 */
2566void *kcalloc(size_t n, size_t size, unsigned int __nocast flags) 2565void *kzalloc(size_t size, unsigned int __nocast flags)
2567{ 2566{
2568 void *ret = NULL; 2567 void *ret = kmalloc(size, flags);
2569
2570 if (n != 0 && size > INT_MAX / n)
2571 return ret;
2572
2573 ret = kmalloc(n * size, flags);
2574 if (ret) 2568 if (ret)
2575 memset(ret, 0, n * size); 2569 memset(ret, 0, size);
2576 return ret; 2570 return ret;
2577} 2571}
2578EXPORT_SYMBOL(kcalloc); 2572EXPORT_SYMBOL(kzalloc);
2579 2573
2580/** 2574/**
2581 * kfree - free previously allocated memory 2575 * kfree - free previously allocated memory
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0095533cdde9..a740778f688d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -894,7 +894,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
894 if (zone->present_pages == 0) 894 if (zone->present_pages == 0)
895 continue; 895 continue;
896 896
897 if (!cpuset_zone_allowed(zone)) 897 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
898 continue; 898 continue;
899 899
900 zone->temp_priority = sc->priority; 900 zone->temp_priority = sc->priority;
@@ -940,7 +940,7 @@ int try_to_free_pages(struct zone **zones, unsigned int gfp_mask)
940 for (i = 0; zones[i] != NULL; i++) { 940 for (i = 0; zones[i] != NULL; i++) {
941 struct zone *zone = zones[i]; 941 struct zone *zone = zones[i];
942 942
943 if (!cpuset_zone_allowed(zone)) 943 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
944 continue; 944 continue;
945 945
946 zone->temp_priority = DEF_PRIORITY; 946 zone->temp_priority = DEF_PRIORITY;
@@ -986,7 +986,7 @@ out:
986 for (i = 0; zones[i] != 0; i++) { 986 for (i = 0; zones[i] != 0; i++) {
987 struct zone *zone = zones[i]; 987 struct zone *zone = zones[i];
988 988
989 if (!cpuset_zone_allowed(zone)) 989 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
990 continue; 990 continue;
991 991
992 zone->prev_priority = zone->temp_priority; 992 zone->prev_priority = zone->temp_priority;
@@ -1256,7 +1256,7 @@ void wakeup_kswapd(struct zone *zone, int order)
1256 return; 1256 return;
1257 if (pgdat->kswapd_max_order < order) 1257 if (pgdat->kswapd_max_order < order)
1258 pgdat->kswapd_max_order = order; 1258 pgdat->kswapd_max_order = order;
1259 if (!cpuset_zone_allowed(zone)) 1259 if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
1260 return; 1260 return;
1261 if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) 1261 if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
1262 return; 1262 return;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index ea43dfb774e2..ed705ddad56b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1874,6 +1874,7 @@ static void ax25_info_stop(struct seq_file *seq, void *v)
1874static int ax25_info_show(struct seq_file *seq, void *v) 1874static int ax25_info_show(struct seq_file *seq, void *v)
1875{ 1875{
1876 ax25_cb *ax25 = v; 1876 ax25_cb *ax25 = v;
1877 char buf[11];
1877 int k; 1878 int k;
1878 1879
1879 1880
@@ -1885,13 +1886,13 @@ static int ax25_info_show(struct seq_file *seq, void *v)
1885 seq_printf(seq, "%8.8lx %s %s%s ", 1886 seq_printf(seq, "%8.8lx %s %s%s ",
1886 (long) ax25, 1887 (long) ax25,
1887 ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name, 1888 ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name,
1888 ax2asc(&ax25->source_addr), 1889 ax2asc(buf, &ax25->source_addr),
1889 ax25->iamdigi? "*":""); 1890 ax25->iamdigi? "*":"");
1890 seq_printf(seq, "%s", ax2asc(&ax25->dest_addr)); 1891 seq_printf(seq, "%s", ax2asc(buf, &ax25->dest_addr));
1891 1892
1892 for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) { 1893 for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) {
1893 seq_printf(seq, ",%s%s", 1894 seq_printf(seq, ",%s%s",
1894 ax2asc(&ax25->digipeat->calls[k]), 1895 ax2asc(buf, &ax25->digipeat->calls[k]),
1895 ax25->digipeat->repeated[k]? "*":""); 1896 ax25->digipeat->repeated[k]? "*":"");
1896 } 1897 }
1897 1898
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index f4fa6dfb846e..dca179daf415 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -36,9 +36,8 @@ ax25_address null_ax25_address = {{0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00}};
36/* 36/*
37 * ax25 -> ascii conversion 37 * ax25 -> ascii conversion
38 */ 38 */
39char *ax2asc(ax25_address *a) 39char *ax2asc(char *buf, ax25_address *a)
40{ 40{
41 static char buf[11];
42 char c, *s; 41 char c, *s;
43 int n; 42 int n;
44 43
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index c288526da4ce..26b77d972220 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -298,6 +298,8 @@ static void ax25_rt_seq_stop(struct seq_file *seq, void *v)
298 298
299static int ax25_rt_seq_show(struct seq_file *seq, void *v) 299static int ax25_rt_seq_show(struct seq_file *seq, void *v)
300{ 300{
301 char buf[11];
302
301 if (v == SEQ_START_TOKEN) 303 if (v == SEQ_START_TOKEN)
302 seq_puts(seq, "callsign dev mode digipeaters\n"); 304 seq_puts(seq, "callsign dev mode digipeaters\n");
303 else { 305 else {
@@ -308,7 +310,7 @@ static int ax25_rt_seq_show(struct seq_file *seq, void *v)
308 if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0) 310 if (ax25cmp(&ax25_rt->callsign, &null_ax25_address) == 0)
309 callsign = "default"; 311 callsign = "default";
310 else 312 else
311 callsign = ax2asc(&ax25_rt->callsign); 313 callsign = ax2asc(buf, &ax25_rt->callsign);
312 314
313 seq_printf(seq, "%-9s %-4s", 315 seq_printf(seq, "%-9s %-4s",
314 callsign, 316 callsign,
@@ -328,7 +330,8 @@ static int ax25_rt_seq_show(struct seq_file *seq, void *v)
328 330
329 if (ax25_rt->digipeat != NULL) 331 if (ax25_rt->digipeat != NULL)
330 for (i = 0; i < ax25_rt->digipeat->ndigi; i++) 332 for (i = 0; i < ax25_rt->digipeat->ndigi; i++)
331 seq_printf(seq, " %s", ax2asc(&ax25_rt->digipeat->calls[i])); 333 seq_printf(seq, " %s",
334 ax2asc(buf, &ax25_rt->digipeat->calls[i]));
332 335
333 seq_puts(seq, "\n"); 336 seq_puts(seq, "\n");
334 } 337 }
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index a8b3822f3ee4..d53cc8615865 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -168,12 +168,14 @@ static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
168 168
169static int ax25_uid_seq_show(struct seq_file *seq, void *v) 169static int ax25_uid_seq_show(struct seq_file *seq, void *v)
170{ 170{
171 char buf[11];
172
171 if (v == SEQ_START_TOKEN) 173 if (v == SEQ_START_TOKEN)
172 seq_printf(seq, "Policy: %d\n", ax25_uid_policy); 174 seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
173 else { 175 else {
174 struct ax25_uid_assoc *pt = v; 176 struct ax25_uid_assoc *pt = v;
175 177
176 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(&pt->call)); 178 seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
177 } 179 }
178 return 0; 180 return 0;
179} 181}
diff --git a/net/core/sock.c b/net/core/sock.c
index c13594579bfb..ac63b56e23b2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -341,11 +341,11 @@ set_rcvbuf:
341 sock_reset_flag(sk, SOCK_LINGER); 341 sock_reset_flag(sk, SOCK_LINGER);
342 else { 342 else {
343#if (BITS_PER_LONG == 32) 343#if (BITS_PER_LONG == 32)
344 if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 344 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
345 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 345 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
346 else 346 else
347#endif 347#endif
348 sk->sk_lingertime = ling.l_linger * HZ; 348 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
349 sock_set_flag(sk, SOCK_LINGER); 349 sock_set_flag(sk, SOCK_LINGER);
350 } 350 }
351 break; 351 break;
@@ -1529,6 +1529,8 @@ EXPORT_SYMBOL(proto_register);
1529void proto_unregister(struct proto *prot) 1529void proto_unregister(struct proto *prot)
1530{ 1530{
1531 write_lock(&proto_list_lock); 1531 write_lock(&proto_list_lock);
1532 list_del(&prot->node);
1533 write_unlock(&proto_list_lock);
1532 1534
1533 if (prot->slab != NULL) { 1535 if (prot->slab != NULL) {
1534 kmem_cache_destroy(prot->slab); 1536 kmem_cache_destroy(prot->slab);
@@ -1550,9 +1552,6 @@ void proto_unregister(struct proto *prot)
1550 kfree(name); 1552 kfree(name);
1551 prot->twsk_slab = NULL; 1553 prot->twsk_slab = NULL;
1552 } 1554 }
1553
1554 list_del(&prot->node);
1555 write_unlock(&proto_list_lock);
1556} 1555}
1557 1556
1558EXPORT_SYMBOL(proto_unregister); 1557EXPORT_SYMBOL(proto_unregister);
diff --git a/net/core/wireless.c b/net/core/wireless.c
index 5caae2399f3a..d17f1583ea3e 100644
--- a/net/core/wireless.c
+++ b/net/core/wireless.c
@@ -58,6 +58,13 @@
58 * o Add wmb() in iw_handler_set_spy() for non-coherent archs/cpus 58 * o Add wmb() in iw_handler_set_spy() for non-coherent archs/cpus
59 * Based on patch from Pavel Roskin <proski@gnu.org> : 59 * Based on patch from Pavel Roskin <proski@gnu.org> :
60 * o Fix kernel data leak to user space in private handler handling 60 * o Fix kernel data leak to user space in private handler handling
61 *
62 * v7 - 18.3.05 - Jean II
63 * o Remove (struct iw_point *)->pointer from events and streams
64 * o Remove spy_offset from struct iw_handler_def
65 * o Start deprecating dev->get_wireless_stats, output a warning
66 * o If IW_QUAL_DBM is set, show dBm values in /proc/net/wireless
67 * o Don't loose INVALID/DBM flags when clearing UPDATED flags (iwstats)
61 */ 68 */
62 69
63/***************************** INCLUDES *****************************/ 70/***************************** INCLUDES *****************************/
@@ -446,10 +453,14 @@ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev)
446 (dev->wireless_handlers->get_wireless_stats != NULL)) 453 (dev->wireless_handlers->get_wireless_stats != NULL))
447 return dev->wireless_handlers->get_wireless_stats(dev); 454 return dev->wireless_handlers->get_wireless_stats(dev);
448 455
449 /* Old location, will be phased out in next WE */ 456 /* Old location, field to be removed in next WE */
450 return (dev->get_wireless_stats ? 457 if(dev->get_wireless_stats) {
451 dev->get_wireless_stats(dev) : 458 printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n",
452 (struct iw_statistics *) NULL); 459 dev->name);
460 return dev->get_wireless_stats(dev);
461 }
462 /* Not found */
463 return (struct iw_statistics *) NULL;
453} 464}
454 465
455/* ---------------------------------------------------------------- */ 466/* ---------------------------------------------------------------- */
@@ -541,16 +552,18 @@ static __inline__ void wireless_seq_printf_stats(struct seq_file *seq,
541 dev->name, stats->status, stats->qual.qual, 552 dev->name, stats->status, stats->qual.qual,
542 stats->qual.updated & IW_QUAL_QUAL_UPDATED 553 stats->qual.updated & IW_QUAL_QUAL_UPDATED
543 ? '.' : ' ', 554 ? '.' : ' ',
544 ((__u8) stats->qual.level), 555 ((__s32) stats->qual.level) -
556 ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0),
545 stats->qual.updated & IW_QUAL_LEVEL_UPDATED 557 stats->qual.updated & IW_QUAL_LEVEL_UPDATED
546 ? '.' : ' ', 558 ? '.' : ' ',
547 ((__u8) stats->qual.noise), 559 ((__s32) stats->qual.noise) -
560 ((stats->qual.updated & IW_QUAL_DBM) ? 0x100 : 0),
548 stats->qual.updated & IW_QUAL_NOISE_UPDATED 561 stats->qual.updated & IW_QUAL_NOISE_UPDATED
549 ? '.' : ' ', 562 ? '.' : ' ',
550 stats->discard.nwid, stats->discard.code, 563 stats->discard.nwid, stats->discard.code,
551 stats->discard.fragment, stats->discard.retries, 564 stats->discard.fragment, stats->discard.retries,
552 stats->discard.misc, stats->miss.beacon); 565 stats->discard.misc, stats->miss.beacon);
553 stats->qual.updated = 0; 566 stats->qual.updated &= ~IW_QUAL_ALL_UPDATED;
554 } 567 }
555} 568}
556 569
@@ -593,6 +606,7 @@ static struct file_operations wireless_seq_fops = {
593 606
594int __init wireless_proc_init(void) 607int __init wireless_proc_init(void)
595{ 608{
609 /* Create /proc/net/wireless entry */
596 if (!proc_net_fops_create("wireless", S_IRUGO, &wireless_seq_fops)) 610 if (!proc_net_fops_create("wireless", S_IRUGO, &wireless_seq_fops))
597 return -ENOMEM; 611 return -ENOMEM;
598 612
@@ -627,9 +641,9 @@ static inline int dev_iwstats(struct net_device *dev, struct ifreq *ifr)
627 sizeof(struct iw_statistics))) 641 sizeof(struct iw_statistics)))
628 return -EFAULT; 642 return -EFAULT;
629 643
630 /* Check if we need to clear the update flag */ 644 /* Check if we need to clear the updated flag */
631 if(wrq->u.data.flags != 0) 645 if(wrq->u.data.flags != 0)
632 stats->qual.updated = 0; 646 stats->qual.updated &= ~IW_QUAL_ALL_UPDATED;
633 return 0; 647 return 0;
634 } else 648 } else
635 return -EOPNOTSUPP; 649 return -EOPNOTSUPP;
@@ -1161,10 +1175,11 @@ void wireless_send_event(struct net_device * dev,
1161 struct iw_event *event; /* Mallocated whole event */ 1175 struct iw_event *event; /* Mallocated whole event */
1162 int event_len; /* Its size */ 1176 int event_len; /* Its size */
1163 int hdr_len; /* Size of the event header */ 1177 int hdr_len; /* Size of the event header */
1178 int wrqu_off = 0; /* Offset in wrqu */
1164 /* Don't "optimise" the following variable, it will crash */ 1179 /* Don't "optimise" the following variable, it will crash */
1165 unsigned cmd_index; /* *MUST* be unsigned */ 1180 unsigned cmd_index; /* *MUST* be unsigned */
1166 1181
1167 /* Get the description of the IOCTL */ 1182 /* Get the description of the Event */
1168 if(cmd <= SIOCIWLAST) { 1183 if(cmd <= SIOCIWLAST) {
1169 cmd_index = cmd - SIOCIWFIRST; 1184 cmd_index = cmd - SIOCIWFIRST;
1170 if(cmd_index < standard_ioctl_num) 1185 if(cmd_index < standard_ioctl_num)
@@ -1207,6 +1222,8 @@ void wireless_send_event(struct net_device * dev,
1207 /* Calculate extra_len - extra is NULL for restricted events */ 1222 /* Calculate extra_len - extra is NULL for restricted events */
1208 if(extra != NULL) 1223 if(extra != NULL)
1209 extra_len = wrqu->data.length * descr->token_size; 1224 extra_len = wrqu->data.length * descr->token_size;
1225 /* Always at an offset in wrqu */
1226 wrqu_off = IW_EV_POINT_OFF;
1210#ifdef WE_EVENT_DEBUG 1227#ifdef WE_EVENT_DEBUG
1211 printk(KERN_DEBUG "%s (WE) : Event 0x%04X, tokens %d, extra_len %d\n", dev->name, cmd, wrqu->data.length, extra_len); 1228 printk(KERN_DEBUG "%s (WE) : Event 0x%04X, tokens %d, extra_len %d\n", dev->name, cmd, wrqu->data.length, extra_len);
1212#endif /* WE_EVENT_DEBUG */ 1229#endif /* WE_EVENT_DEBUG */
@@ -1217,7 +1234,7 @@ void wireless_send_event(struct net_device * dev,
1217 event_len = hdr_len + extra_len; 1234 event_len = hdr_len + extra_len;
1218 1235
1219#ifdef WE_EVENT_DEBUG 1236#ifdef WE_EVENT_DEBUG
1220 printk(KERN_DEBUG "%s (WE) : Event 0x%04X, hdr_len %d, event_len %d\n", dev->name, cmd, hdr_len, event_len); 1237 printk(KERN_DEBUG "%s (WE) : Event 0x%04X, hdr_len %d, wrqu_off %d, event_len %d\n", dev->name, cmd, hdr_len, wrqu_off, event_len);
1221#endif /* WE_EVENT_DEBUG */ 1238#endif /* WE_EVENT_DEBUG */
1222 1239
1223 /* Create temporary buffer to hold the event */ 1240 /* Create temporary buffer to hold the event */
@@ -1228,7 +1245,7 @@ void wireless_send_event(struct net_device * dev,
1228 /* Fill event */ 1245 /* Fill event */
1229 event->len = event_len; 1246 event->len = event_len;
1230 event->cmd = cmd; 1247 event->cmd = cmd;
1231 memcpy(&event->u, wrqu, hdr_len - IW_EV_LCP_LEN); 1248 memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN);
1232 if(extra != NULL) 1249 if(extra != NULL)
1233 memcpy(((char *) event) + hdr_len, extra, extra_len); 1250 memcpy(((char *) event) + hdr_len, extra, extra_len);
1234 1251
@@ -1249,7 +1266,7 @@ void wireless_send_event(struct net_device * dev,
1249 * Now, the driver can delegate this task to Wireless Extensions. 1266 * Now, the driver can delegate this task to Wireless Extensions.
1250 * It needs to use those standard spy iw_handler in struct iw_handler_def, 1267 * It needs to use those standard spy iw_handler in struct iw_handler_def,
1251 * push data to us via wireless_spy_update() and include struct iw_spy_data 1268 * push data to us via wireless_spy_update() and include struct iw_spy_data
1252 * in its private part (and advertise it in iw_handler_def->spy_offset). 1269 * in its private part (and export it in net_device->wireless_data->spy_data).
1253 * One of the main advantage of centralising spy support here is that 1270 * One of the main advantage of centralising spy support here is that
1254 * it becomes much easier to improve and extend it without having to touch 1271 * it becomes much easier to improve and extend it without having to touch
1255 * the drivers. One example is the addition of the Spy-Threshold events. 1272 * the drivers. One example is the addition of the Spy-Threshold events.
@@ -1266,10 +1283,7 @@ static inline struct iw_spy_data * get_spydata(struct net_device *dev)
1266 /* This is the new way */ 1283 /* This is the new way */
1267 if(dev->wireless_data) 1284 if(dev->wireless_data)
1268 return(dev->wireless_data->spy_data); 1285 return(dev->wireless_data->spy_data);
1269 1286 return NULL;
1270 /* This is the old way. Doesn't work for multi-headed drivers.
1271 * It will be removed in the next version of WE. */
1272 return (dev->priv + dev->wireless_handlers->spy_offset);
1273} 1287}
1274 1288
1275/*------------------------------------------------------------------*/ 1289/*------------------------------------------------------------------*/
@@ -1284,10 +1298,6 @@ int iw_handler_set_spy(struct net_device * dev,
1284 struct iw_spy_data * spydata = get_spydata(dev); 1298 struct iw_spy_data * spydata = get_spydata(dev);
1285 struct sockaddr * address = (struct sockaddr *) extra; 1299 struct sockaddr * address = (struct sockaddr *) extra;
1286 1300
1287 if(!dev->wireless_data)
1288 /* Help user know that driver needs updating */
1289 printk(KERN_DEBUG "%s (WE) : Driver using old/buggy spy support, please fix driver !\n",
1290 dev->name);
1291 /* Make sure driver is not buggy or using the old API */ 1301 /* Make sure driver is not buggy or using the old API */
1292 if(!spydata) 1302 if(!spydata)
1293 return -EOPNOTSUPP; 1303 return -EOPNOTSUPP;
@@ -1318,7 +1328,7 @@ int iw_handler_set_spy(struct net_device * dev,
1318 sizeof(struct iw_quality) * IW_MAX_SPY); 1328 sizeof(struct iw_quality) * IW_MAX_SPY);
1319 1329
1320#ifdef WE_SPY_DEBUG 1330#ifdef WE_SPY_DEBUG
1321 printk(KERN_DEBUG "iw_handler_set_spy() : offset %ld, spydata %p, num %d\n", dev->wireless_handlers->spy_offset, spydata, wrqu->data.length); 1331 printk(KERN_DEBUG "iw_handler_set_spy() : wireless_data %p, spydata %p, num %d\n", dev->wireless_data, spydata, wrqu->data.length);
1322 for (i = 0; i < wrqu->data.length; i++) 1332 for (i = 0; i < wrqu->data.length; i++)
1323 printk(KERN_DEBUG 1333 printk(KERN_DEBUG
1324 "%02X:%02X:%02X:%02X:%02X:%02X \n", 1334 "%02X:%02X:%02X:%02X:%02X:%02X \n",
@@ -1371,7 +1381,7 @@ int iw_handler_get_spy(struct net_device * dev,
1371 sizeof(struct iw_quality) * spydata->spy_number); 1381 sizeof(struct iw_quality) * spydata->spy_number);
1372 /* Reset updated flags. */ 1382 /* Reset updated flags. */
1373 for(i = 0; i < spydata->spy_number; i++) 1383 for(i = 0; i < spydata->spy_number; i++)
1374 spydata->spy_stat[i].updated = 0; 1384 spydata->spy_stat[i].updated &= ~IW_QUAL_ALL_UPDATED;
1375 return 0; 1385 return 0;
1376} 1386}
1377 1387
@@ -1486,7 +1496,7 @@ void wireless_spy_update(struct net_device * dev,
1486 return; 1496 return;
1487 1497
1488#ifdef WE_SPY_DEBUG 1498#ifdef WE_SPY_DEBUG
1489 printk(KERN_DEBUG "wireless_spy_update() : offset %ld, spydata %p, address %02X:%02X:%02X:%02X:%02X:%02X\n", dev->wireless_handlers->spy_offset, spydata, address[0], address[1], address[2], address[3], address[4], address[5]); 1499 printk(KERN_DEBUG "wireless_spy_update() : wireless_data %p, spydata %p, address %02X:%02X:%02X:%02X:%02X:%02X\n", dev->wireless_data, spydata, address[0], address[1], address[2], address[3], address[4], address[5]);
1490#endif /* WE_SPY_DEBUG */ 1500#endif /* WE_SPY_DEBUG */
1491 1501
1492 /* Update all records that match */ 1502 /* Update all records that match */
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
index 05a6f2f298db..61a9d92e455b 100644
--- a/net/ieee80211/ieee80211_crypt.c
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -30,7 +30,6 @@ struct ieee80211_crypto_alg {
30 struct ieee80211_crypto_ops *ops; 30 struct ieee80211_crypto_ops *ops;
31}; 31};
32 32
33
34struct ieee80211_crypto { 33struct ieee80211_crypto {
35 struct list_head algs; 34 struct list_head algs;
36 spinlock_t lock; 35 spinlock_t lock;
@@ -38,8 +37,7 @@ struct ieee80211_crypto {
38 37
39static struct ieee80211_crypto *hcrypt; 38static struct ieee80211_crypto *hcrypt;
40 39
41void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, 40void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force)
42 int force)
43{ 41{
44 struct list_head *ptr, *n; 42 struct list_head *ptr, *n;
45 struct ieee80211_crypt_data *entry; 43 struct ieee80211_crypt_data *entry;
@@ -140,7 +138,7 @@ int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
140 spin_lock_irqsave(&hcrypt->lock, flags); 138 spin_lock_irqsave(&hcrypt->lock, flags);
141 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) { 139 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
142 struct ieee80211_crypto_alg *alg = 140 struct ieee80211_crypto_alg *alg =
143 (struct ieee80211_crypto_alg *) ptr; 141 (struct ieee80211_crypto_alg *)ptr;
144 if (alg->ops == ops) { 142 if (alg->ops == ops) {
145 list_del(&alg->list); 143 list_del(&alg->list);
146 del_alg = alg; 144 del_alg = alg;
@@ -158,8 +156,7 @@ int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
158 return del_alg ? 0 : -1; 156 return del_alg ? 0 : -1;
159} 157}
160 158
161 159struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name)
162struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
163{ 160{
164 unsigned long flags; 161 unsigned long flags;
165 struct list_head *ptr; 162 struct list_head *ptr;
@@ -171,7 +168,7 @@ struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
171 spin_lock_irqsave(&hcrypt->lock, flags); 168 spin_lock_irqsave(&hcrypt->lock, flags);
172 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) { 169 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
173 struct ieee80211_crypto_alg *alg = 170 struct ieee80211_crypto_alg *alg =
174 (struct ieee80211_crypto_alg *) ptr; 171 (struct ieee80211_crypto_alg *)ptr;
175 if (strcmp(alg->ops->name, name) == 0) { 172 if (strcmp(alg->ops->name, name) == 0) {
176 found_alg = alg; 173 found_alg = alg;
177 break; 174 break;
@@ -185,9 +182,13 @@ struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
185 return NULL; 182 return NULL;
186} 183}
187 184
188 185static void *ieee80211_crypt_null_init(int keyidx)
189static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; } 186{
190static void ieee80211_crypt_null_deinit(void *priv) {} 187 return (void *)1;
188}
189static void ieee80211_crypt_null_deinit(void *priv)
190{
191}
191 192
192static struct ieee80211_crypto_ops ieee80211_crypt_null = { 193static struct ieee80211_crypto_ops ieee80211_crypt_null = {
193 .name = "NULL", 194 .name = "NULL",
@@ -204,7 +205,6 @@ static struct ieee80211_crypto_ops ieee80211_crypt_null = {
204 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
205}; 206};
206 207
207
208static int __init ieee80211_crypto_init(void) 208static int __init ieee80211_crypto_init(void)
209{ 209{
210 int ret = -ENOMEM; 210 int ret = -ENOMEM;
@@ -222,11 +222,10 @@ static int __init ieee80211_crypto_init(void)
222 kfree(hcrypt); 222 kfree(hcrypt);
223 hcrypt = NULL; 223 hcrypt = NULL;
224 } 224 }
225out: 225 out:
226 return ret; 226 return ret;
227} 227}
228 228
229
230static void __exit ieee80211_crypto_deinit(void) 229static void __exit ieee80211_crypto_deinit(void)
231{ 230{
232 struct list_head *ptr, *n; 231 struct list_head *ptr, *n;
@@ -237,7 +236,7 @@ static void __exit ieee80211_crypto_deinit(void)
237 for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs; 236 for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
238 ptr = n, n = ptr->next) { 237 ptr = n, n = ptr->next) {
239 struct ieee80211_crypto_alg *alg = 238 struct ieee80211_crypto_alg *alg =
240 (struct ieee80211_crypto_alg *) ptr; 239 (struct ieee80211_crypto_alg *)ptr;
241 list_del(ptr); 240 list_del(ptr);
242 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm " 241 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
243 "'%s' (deinit)\n", alg->ops->name); 242 "'%s' (deinit)\n", alg->ops->name);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
index 11d15573b26a..8fc13f45971e 100644
--- a/net/ieee80211/ieee80211_crypt_ccmp.c
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -24,7 +24,6 @@
24 24
25#include <net/ieee80211.h> 25#include <net/ieee80211.h>
26 26
27
28#include <linux/crypto.h> 27#include <linux/crypto.h>
29#include <asm/scatterlist.h> 28#include <asm/scatterlist.h>
30 29
@@ -55,7 +54,7 @@ struct ieee80211_ccmp_data {
55 54
56 /* scratch buffers for virt_to_page() (crypto API) */ 55 /* scratch buffers for virt_to_page() (crypto API) */
57 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], 56 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
58 tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; 57 tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
59 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; 58 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
60}; 59};
61 60
@@ -75,7 +74,7 @@ static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
75 crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN); 74 crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
76} 75}
77 76
78static void * ieee80211_ccmp_init(int key_idx) 77static void *ieee80211_ccmp_init(int key_idx)
79{ 78{
80 struct ieee80211_ccmp_data *priv; 79 struct ieee80211_ccmp_data *priv;
81 80
@@ -94,7 +93,7 @@ static void * ieee80211_ccmp_init(int key_idx)
94 93
95 return priv; 94 return priv;
96 95
97fail: 96 fail:
98 if (priv) { 97 if (priv) {
99 if (priv->tfm) 98 if (priv->tfm)
100 crypto_free_tfm(priv->tfm); 99 crypto_free_tfm(priv->tfm);
@@ -104,7 +103,6 @@ fail:
104 return NULL; 103 return NULL;
105} 104}
106 105
107
108static void ieee80211_ccmp_deinit(void *priv) 106static void ieee80211_ccmp_deinit(void *priv)
109{ 107{
110 struct ieee80211_ccmp_data *_priv = priv; 108 struct ieee80211_ccmp_data *_priv = priv;
@@ -113,19 +111,16 @@ static void ieee80211_ccmp_deinit(void *priv)
113 kfree(priv); 111 kfree(priv);
114} 112}
115 113
116 114static inline void xor_block(u8 * b, u8 * a, size_t len)
117static inline void xor_block(u8 *b, u8 *a, size_t len)
118{ 115{
119 int i; 116 int i;
120 for (i = 0; i < len; i++) 117 for (i = 0; i < len; i++)
121 b[i] ^= a[i]; 118 b[i] ^= a[i];
122} 119}
123 120
124
125static void ccmp_init_blocks(struct crypto_tfm *tfm, 121static void ccmp_init_blocks(struct crypto_tfm *tfm,
126 struct ieee80211_hdr *hdr, 122 struct ieee80211_hdr *hdr,
127 u8 *pn, size_t dlen, u8 *b0, u8 *auth, 123 u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0)
128 u8 *s0)
129{ 124{
130 u8 *pos, qc = 0; 125 u8 *pos, qc = 0;
131 size_t aad_len; 126 size_t aad_len;
@@ -142,7 +137,7 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
142 if (a4_included) 137 if (a4_included)
143 aad_len += 6; 138 aad_len += 6;
144 if (qc_included) { 139 if (qc_included) {
145 pos = (u8 *) &hdr->addr4; 140 pos = (u8 *) & hdr->addr4;
146 if (a4_included) 141 if (a4_included)
147 pos += 6; 142 pos += 6;
148 qc = *pos & 0x0f; 143 qc = *pos & 0x0f;
@@ -169,14 +164,14 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
169 * QC (if present) 164 * QC (if present)
170 */ 165 */
171 pos = (u8 *) hdr; 166 pos = (u8 *) hdr;
172 aad[0] = 0; /* aad_len >> 8 */ 167 aad[0] = 0; /* aad_len >> 8 */
173 aad[1] = aad_len & 0xff; 168 aad[1] = aad_len & 0xff;
174 aad[2] = pos[0] & 0x8f; 169 aad[2] = pos[0] & 0x8f;
175 aad[3] = pos[1] & 0xc7; 170 aad[3] = pos[1] & 0xc7;
176 memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); 171 memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
177 pos = (u8 *) &hdr->seq_ctl; 172 pos = (u8 *) & hdr->seq_ctl;
178 aad[22] = pos[0] & 0x0f; 173 aad[22] = pos[0] & 0x0f;
179 aad[23] = 0; /* all bits masked */ 174 aad[23] = 0; /* all bits masked */
180 memset(aad + 24, 0, 8); 175 memset(aad + 24, 0, 8);
181 if (a4_included) 176 if (a4_included)
182 memcpy(aad + 24, hdr->addr4, ETH_ALEN); 177 memcpy(aad + 24, hdr->addr4, ETH_ALEN);
@@ -196,7 +191,6 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
196 ieee80211_ccmp_aes_encrypt(tfm, b0, s0); 191 ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
197} 192}
198 193
199
200static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) 194static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
201{ 195{
202 struct ieee80211_ccmp_data *key = priv; 196 struct ieee80211_ccmp_data *key = priv;
@@ -209,8 +203,7 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
209 u8 *s0 = key->tx_s0; 203 u8 *s0 = key->tx_s0;
210 204
211 if (skb_headroom(skb) < CCMP_HDR_LEN || 205 if (skb_headroom(skb) < CCMP_HDR_LEN ||
212 skb_tailroom(skb) < CCMP_MIC_LEN || 206 skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len)
213 skb->len < hdr_len)
214 return -1; 207 return -1;
215 208
216 data_len = skb->len - hdr_len; 209 data_len = skb->len - hdr_len;
@@ -230,13 +223,13 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
230 *pos++ = key->tx_pn[5]; 223 *pos++ = key->tx_pn[5];
231 *pos++ = key->tx_pn[4]; 224 *pos++ = key->tx_pn[4];
232 *pos++ = 0; 225 *pos++ = 0;
233 *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */; 226 *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */ ;
234 *pos++ = key->tx_pn[3]; 227 *pos++ = key->tx_pn[3];
235 *pos++ = key->tx_pn[2]; 228 *pos++ = key->tx_pn[2];
236 *pos++ = key->tx_pn[1]; 229 *pos++ = key->tx_pn[1];
237 *pos++ = key->tx_pn[0]; 230 *pos++ = key->tx_pn[0];
238 231
239 hdr = (struct ieee80211_hdr *) skb->data; 232 hdr = (struct ieee80211_hdr *)skb->data;
240 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); 233 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
241 234
242 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; 235 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
@@ -261,7 +254,6 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
261 return 0; 254 return 0;
262} 255}
263 256
264
265static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) 257static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
266{ 258{
267 struct ieee80211_ccmp_data *key = priv; 259 struct ieee80211_ccmp_data *key = priv;
@@ -280,7 +272,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
280 return -1; 272 return -1;
281 } 273 }
282 274
283 hdr = (struct ieee80211_hdr *) skb->data; 275 hdr = (struct ieee80211_hdr *)skb->data;
284 pos = skb->data + hdr_len; 276 pos = skb->data + hdr_len;
285 keyidx = pos[3]; 277 keyidx = pos[3];
286 if (!(keyidx & (1 << 5))) { 278 if (!(keyidx & (1 << 5))) {
@@ -364,8 +356,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
364 return keyidx; 356 return keyidx;
365} 357}
366 358
367 359static int ieee80211_ccmp_set_key(void *key, int len, u8 * seq, void *priv)
368static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
369{ 360{
370 struct ieee80211_ccmp_data *data = priv; 361 struct ieee80211_ccmp_data *data = priv;
371 int keyidx; 362 int keyidx;
@@ -395,8 +386,7 @@ static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
395 return 0; 386 return 0;
396} 387}
397 388
398 389static int ieee80211_ccmp_get_key(void *key, int len, u8 * seq, void *priv)
399static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
400{ 390{
401 struct ieee80211_ccmp_data *data = priv; 391 struct ieee80211_ccmp_data *data = priv;
402 392
@@ -419,8 +409,7 @@ static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
419 return CCMP_TK_LEN; 409 return CCMP_TK_LEN;
420} 410}
421 411
422 412static char *ieee80211_ccmp_print_stats(char *p, void *priv)
423static char * ieee80211_ccmp_print_stats(char *p, void *priv)
424{ 413{
425 struct ieee80211_ccmp_data *ccmp = priv; 414 struct ieee80211_ccmp_data *ccmp = priv;
426 p += sprintf(p, "key[%d] alg=CCMP key_set=%d " 415 p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
@@ -436,7 +425,6 @@ static char * ieee80211_ccmp_print_stats(char *p, void *priv)
436 return p; 425 return p;
437} 426}
438 427
439
440static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { 428static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
441 .name = "CCMP", 429 .name = "CCMP",
442 .init = ieee80211_ccmp_init, 430 .init = ieee80211_ccmp_init,
@@ -453,18 +441,15 @@ static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
453 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
454}; 442};
455 443
456
457static int __init ieee80211_crypto_ccmp_init(void) 444static int __init ieee80211_crypto_ccmp_init(void)
458{ 445{
459 return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp); 446 return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
460} 447}
461 448
462
463static void __exit ieee80211_crypto_ccmp_exit(void) 449static void __exit ieee80211_crypto_ccmp_exit(void)
464{ 450{
465 ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp); 451 ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
466} 452}
467 453
468
469module_init(ieee80211_crypto_ccmp_init); 454module_init(ieee80211_crypto_ccmp_init);
470module_exit(ieee80211_crypto_ccmp_exit); 455module_exit(ieee80211_crypto_ccmp_exit);
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index f91d92c6df25..d4f9164be1a1 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -23,7 +23,6 @@
23 23
24#include <net/ieee80211.h> 24#include <net/ieee80211.h>
25 25
26
27#include <linux/crypto.h> 26#include <linux/crypto.h>
28#include <asm/scatterlist.h> 27#include <asm/scatterlist.h>
29#include <linux/crc32.h> 28#include <linux/crc32.h>
@@ -62,7 +61,7 @@ struct ieee80211_tkip_data {
62 u8 rx_hdr[16], tx_hdr[16]; 61 u8 rx_hdr[16], tx_hdr[16];
63}; 62};
64 63
65static void * ieee80211_tkip_init(int key_idx) 64static void *ieee80211_tkip_init(int key_idx)
66{ 65{
67 struct ieee80211_tkip_data *priv; 66 struct ieee80211_tkip_data *priv;
68 67
@@ -88,7 +87,7 @@ static void * ieee80211_tkip_init(int key_idx)
88 87
89 return priv; 88 return priv;
90 89
91fail: 90 fail:
92 if (priv) { 91 if (priv) {
93 if (priv->tfm_michael) 92 if (priv->tfm_michael)
94 crypto_free_tfm(priv->tfm_michael); 93 crypto_free_tfm(priv->tfm_michael);
@@ -100,7 +99,6 @@ fail:
100 return NULL; 99 return NULL;
101} 100}
102 101
103
104static void ieee80211_tkip_deinit(void *priv) 102static void ieee80211_tkip_deinit(void *priv)
105{ 103{
106 struct ieee80211_tkip_data *_priv = priv; 104 struct ieee80211_tkip_data *_priv = priv;
@@ -111,51 +109,42 @@ static void ieee80211_tkip_deinit(void *priv)
111 kfree(priv); 109 kfree(priv);
112} 110}
113 111
114
115static inline u16 RotR1(u16 val) 112static inline u16 RotR1(u16 val)
116{ 113{
117 return (val >> 1) | (val << 15); 114 return (val >> 1) | (val << 15);
118} 115}
119 116
120
121static inline u8 Lo8(u16 val) 117static inline u8 Lo8(u16 val)
122{ 118{
123 return val & 0xff; 119 return val & 0xff;
124} 120}
125 121
126
127static inline u8 Hi8(u16 val) 122static inline u8 Hi8(u16 val)
128{ 123{
129 return val >> 8; 124 return val >> 8;
130} 125}
131 126
132
133static inline u16 Lo16(u32 val) 127static inline u16 Lo16(u32 val)
134{ 128{
135 return val & 0xffff; 129 return val & 0xffff;
136} 130}
137 131
138
139static inline u16 Hi16(u32 val) 132static inline u16 Hi16(u32 val)
140{ 133{
141 return val >> 16; 134 return val >> 16;
142} 135}
143 136
144
145static inline u16 Mk16(u8 hi, u8 lo) 137static inline u16 Mk16(u8 hi, u8 lo)
146{ 138{
147 return lo | (((u16) hi) << 8); 139 return lo | (((u16) hi) << 8);
148} 140}
149 141
150 142static inline u16 Mk16_le(u16 * v)
151static inline u16 Mk16_le(u16 *v)
152{ 143{
153 return le16_to_cpu(*v); 144 return le16_to_cpu(*v);
154} 145}
155 146
156 147static const u16 Sbox[256] = {
157static const u16 Sbox[256] =
158{
159 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 148 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
160 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 149 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
161 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 150 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
@@ -190,17 +179,16 @@ static const u16 Sbox[256] =
190 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, 179 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
191}; 180};
192 181
193
194static inline u16 _S_(u16 v) 182static inline u16 _S_(u16 v)
195{ 183{
196 u16 t = Sbox[Hi8(v)]; 184 u16 t = Sbox[Hi8(v)];
197 return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); 185 return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
198} 186}
199 187
200
201#define PHASE1_LOOP_COUNT 8 188#define PHASE1_LOOP_COUNT 8
202 189
203static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32) 190static void tkip_mixing_phase1(u16 * TTAK, const u8 * TK, const u8 * TA,
191 u32 IV32)
204{ 192{
205 int i, j; 193 int i, j;
206 194
@@ -221,13 +209,12 @@ static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
221 } 209 }
222} 210}
223 211
224 212static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK,
225static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
226 u16 IV16) 213 u16 IV16)
227{ 214{
228 /* Make temporary area overlap WEP seed so that the final copy can be 215 /* Make temporary area overlap WEP seed so that the final copy can be
229 * avoided on little endian hosts. */ 216 * avoided on little endian hosts. */
230 u16 *PPK = (u16 *) &WEPSeed[4]; 217 u16 *PPK = (u16 *) & WEPSeed[4];
231 218
232 /* Step 1 - make copy of TTAK and bring in TSC */ 219 /* Step 1 - make copy of TTAK and bring in TSC */
233 PPK[0] = TTAK[0]; 220 PPK[0] = TTAK[0];
@@ -238,15 +225,15 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
238 PPK[5] = TTAK[4] + IV16; 225 PPK[5] = TTAK[4] + IV16;
239 226
240 /* Step 2 - 96-bit bijective mixing using S-box */ 227 /* Step 2 - 96-bit bijective mixing using S-box */
241 PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0])); 228 PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) & TK[0]));
242 PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2])); 229 PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) & TK[2]));
243 PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4])); 230 PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) & TK[4]));
244 PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6])); 231 PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) & TK[6]));
245 PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8])); 232 PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) & TK[8]));
246 PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10])); 233 PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) & TK[10]));
247 234
248 PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12])); 235 PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) & TK[12]));
249 PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14])); 236 PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) & TK[14]));
250 PPK[2] += RotR1(PPK[1]); 237 PPK[2] += RotR1(PPK[1]);
251 PPK[3] += RotR1(PPK[2]); 238 PPK[3] += RotR1(PPK[2]);
252 PPK[4] += RotR1(PPK[3]); 239 PPK[4] += RotR1(PPK[3]);
@@ -257,7 +244,7 @@ static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
257 WEPSeed[0] = Hi8(IV16); 244 WEPSeed[0] = Hi8(IV16);
258 WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; 245 WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
259 WEPSeed[2] = Lo8(IV16); 246 WEPSeed[2] = Lo8(IV16);
260 WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1); 247 WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) & TK[0])) >> 1);
261 248
262#ifdef __BIG_ENDIAN 249#ifdef __BIG_ENDIAN
263 { 250 {
@@ -281,7 +268,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
281 skb->len < hdr_len) 268 skb->len < hdr_len)
282 return -1; 269 return -1;
283 270
284 hdr = (struct ieee80211_hdr *) skb->data; 271 hdr = (struct ieee80211_hdr *)skb->data;
285 if (!tkey->tx_phase1_done) { 272 if (!tkey->tx_phase1_done) {
286 tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, 273 tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
287 tkey->tx_iv32); 274 tkey->tx_iv32);
@@ -298,7 +285,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
298 *pos++ = rc4key[0]; 285 *pos++ = rc4key[0];
299 *pos++ = rc4key[1]; 286 *pos++ = rc4key[1];
300 *pos++ = rc4key[2]; 287 *pos++ = rc4key[2];
301 *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */; 288 *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ;
302 *pos++ = tkey->tx_iv32 & 0xff; 289 *pos++ = tkey->tx_iv32 & 0xff;
303 *pos++ = (tkey->tx_iv32 >> 8) & 0xff; 290 *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
304 *pos++ = (tkey->tx_iv32 >> 16) & 0xff; 291 *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
@@ -341,7 +328,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
341 if (skb->len < hdr_len + 8 + 4) 328 if (skb->len < hdr_len + 8 + 4)
342 return -1; 329 return -1;
343 330
344 hdr = (struct ieee80211_hdr *) skb->data; 331 hdr = (struct ieee80211_hdr *)skb->data;
345 pos = skb->data + hdr_len; 332 pos = skb->data + hdr_len;
346 keyidx = pos[3]; 333 keyidx = pos[3];
347 if (!(keyidx & (1 << 5))) { 334 if (!(keyidx & (1 << 5))) {
@@ -427,9 +414,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
427 return keyidx; 414 return keyidx;
428} 415}
429 416
430 417static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
431static int michael_mic(struct ieee80211_tkip_data *tkey, u8 *key, u8 *hdr, 418 u8 * data, size_t data_len, u8 * mic)
432 u8 *data, size_t data_len, u8 *mic)
433{ 419{
434 struct scatterlist sg[2]; 420 struct scatterlist sg[2];
435 421
@@ -453,37 +439,37 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 *key, u8 *hdr,
453 return 0; 439 return 0;
454} 440}
455 441
456static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) 442static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
457{ 443{
458 struct ieee80211_hdr *hdr11; 444 struct ieee80211_hdr *hdr11;
459 445
460 hdr11 = (struct ieee80211_hdr *) skb->data; 446 hdr11 = (struct ieee80211_hdr *)skb->data;
461 switch (le16_to_cpu(hdr11->frame_ctl) & 447 switch (le16_to_cpu(hdr11->frame_ctl) &
462 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { 448 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
463 case IEEE80211_FCTL_TODS: 449 case IEEE80211_FCTL_TODS:
464 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ 450 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
465 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ 451 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
466 break; 452 break;
467 case IEEE80211_FCTL_FROMDS: 453 case IEEE80211_FCTL_FROMDS:
468 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ 454 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
469 memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */ 455 memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
470 break; 456 break;
471 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: 457 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
472 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ 458 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
473 memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ 459 memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
474 break; 460 break;
475 case 0: 461 case 0:
476 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ 462 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
477 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ 463 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
478 break; 464 break;
479 } 465 }
480 466
481 hdr[12] = 0; /* priority */ 467 hdr[12] = 0; /* priority */
482 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ 468 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
483} 469}
484 470
485 471static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
486static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) 472 void *priv)
487{ 473{
488 struct ieee80211_tkip_data *tkey = priv; 474 struct ieee80211_tkip_data *tkey = priv;
489 u8 *pos; 475 u8 *pos;
@@ -504,11 +490,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *pri
504 return 0; 490 return 0;
505} 491}
506 492
507
508#if WIRELESS_EXT >= 18 493#if WIRELESS_EXT >= 18
509static void ieee80211_michael_mic_failure(struct net_device *dev, 494static void ieee80211_michael_mic_failure(struct net_device *dev,
510 struct ieee80211_hdr *hdr, 495 struct ieee80211_hdr *hdr, int keyidx)
511 int keyidx)
512{ 496{
513 union iwreq_data wrqu; 497 union iwreq_data wrqu;
514 struct iw_michaelmicfailure ev; 498 struct iw_michaelmicfailure ev;
@@ -524,12 +508,11 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
524 memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN); 508 memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
525 memset(&wrqu, 0, sizeof(wrqu)); 509 memset(&wrqu, 0, sizeof(wrqu));
526 wrqu.data.length = sizeof(ev); 510 wrqu.data.length = sizeof(ev);
527 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev); 511 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev);
528} 512}
529#elif WIRELESS_EXT >= 15 513#elif WIRELESS_EXT >= 15
530static void ieee80211_michael_mic_failure(struct net_device *dev, 514static void ieee80211_michael_mic_failure(struct net_device *dev,
531 struct ieee80211_hdr *hdr, 515 struct ieee80211_hdr *hdr, int keyidx)
532 int keyidx)
533{ 516{
534 union iwreq_data wrqu; 517 union iwreq_data wrqu;
535 char buf[128]; 518 char buf[128];
@@ -542,17 +525,16 @@ static void ieee80211_michael_mic_failure(struct net_device *dev,
542 wrqu.data.length = strlen(buf); 525 wrqu.data.length = strlen(buf);
543 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); 526 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
544} 527}
545#else /* WIRELESS_EXT >= 15 */ 528#else /* WIRELESS_EXT >= 15 */
546static inline void ieee80211_michael_mic_failure(struct net_device *dev, 529static inline void ieee80211_michael_mic_failure(struct net_device *dev,
547 struct ieee80211_hdr *hdr, 530 struct ieee80211_hdr *hdr,
548 int keyidx) 531 int keyidx)
549{ 532{
550} 533}
551#endif /* WIRELESS_EXT >= 15 */ 534#endif /* WIRELESS_EXT >= 15 */
552
553 535
554static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, 536static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
555 int hdr_len, void *priv) 537 int hdr_len, void *priv)
556{ 538{
557 struct ieee80211_tkip_data *tkey = priv; 539 struct ieee80211_tkip_data *tkey = priv;
558 u8 mic[8]; 540 u8 mic[8];
@@ -566,7 +548,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
566 return -1; 548 return -1;
567 if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { 549 if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
568 struct ieee80211_hdr *hdr; 550 struct ieee80211_hdr *hdr;
569 hdr = (struct ieee80211_hdr *) skb->data; 551 hdr = (struct ieee80211_hdr *)skb->data;
570 printk(KERN_DEBUG "%s: Michael MIC verification failed for " 552 printk(KERN_DEBUG "%s: Michael MIC verification failed for "
571 "MSDU from " MAC_FMT " keyidx=%d\n", 553 "MSDU from " MAC_FMT " keyidx=%d\n",
572 skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2), 554 skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2),
@@ -587,8 +569,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
587 return 0; 569 return 0;
588} 570}
589 571
590 572static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
591static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
592{ 573{
593 struct ieee80211_tkip_data *tkey = priv; 574 struct ieee80211_tkip_data *tkey = priv;
594 int keyidx; 575 int keyidx;
@@ -603,10 +584,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
603 if (len == TKIP_KEY_LEN) { 584 if (len == TKIP_KEY_LEN) {
604 memcpy(tkey->key, key, TKIP_KEY_LEN); 585 memcpy(tkey->key, key, TKIP_KEY_LEN);
605 tkey->key_set = 1; 586 tkey->key_set = 1;
606 tkey->tx_iv16 = 1; /* TSC is initialized to 1 */ 587 tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
607 if (seq) { 588 if (seq) {
608 tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) | 589 tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
609 (seq[3] << 8) | seq[2]; 590 (seq[3] << 8) | seq[2];
610 tkey->rx_iv16 = (seq[1] << 8) | seq[0]; 591 tkey->rx_iv16 = (seq[1] << 8) | seq[0];
611 } 592 }
612 } else if (len == 0) 593 } else if (len == 0)
@@ -617,8 +598,7 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
617 return 0; 598 return 0;
618} 599}
619 600
620 601static int ieee80211_tkip_get_key(void *key, int len, u8 * seq, void *priv)
621static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
622{ 602{
623 struct ieee80211_tkip_data *tkey = priv; 603 struct ieee80211_tkip_data *tkey = priv;
624 604
@@ -647,8 +627,7 @@ static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
647 return TKIP_KEY_LEN; 627 return TKIP_KEY_LEN;
648} 628}
649 629
650 630static char *ieee80211_tkip_print_stats(char *p, void *priv)
651static char * ieee80211_tkip_print_stats(char *p, void *priv)
652{ 631{
653 struct ieee80211_tkip_data *tkip = priv; 632 struct ieee80211_tkip_data *tkip = priv;
654 p += sprintf(p, "key[%d] alg=TKIP key_set=%d " 633 p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
@@ -674,7 +653,6 @@ static char * ieee80211_tkip_print_stats(char *p, void *priv)
674 return p; 653 return p;
675} 654}
676 655
677
678static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { 656static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
679 .name = "TKIP", 657 .name = "TKIP",
680 .init = ieee80211_tkip_init, 658 .init = ieee80211_tkip_init,
@@ -686,23 +664,20 @@ static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
686 .set_key = ieee80211_tkip_set_key, 664 .set_key = ieee80211_tkip_set_key,
687 .get_key = ieee80211_tkip_get_key, 665 .get_key = ieee80211_tkip_get_key,
688 .print_stats = ieee80211_tkip_print_stats, 666 .print_stats = ieee80211_tkip_print_stats,
689 .extra_prefix_len = 4 + 4, /* IV + ExtIV */ 667 .extra_prefix_len = 4 + 4, /* IV + ExtIV */
690 .extra_postfix_len = 8 + 4, /* MIC + ICV */ 668 .extra_postfix_len = 8 + 4, /* MIC + ICV */
691 .owner = THIS_MODULE, 669 .owner = THIS_MODULE,
692}; 670};
693 671
694
695static int __init ieee80211_crypto_tkip_init(void) 672static int __init ieee80211_crypto_tkip_init(void)
696{ 673{
697 return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip); 674 return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
698} 675}
699 676
700
701static void __exit ieee80211_crypto_tkip_exit(void) 677static void __exit ieee80211_crypto_tkip_exit(void)
702{ 678{
703 ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip); 679 ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
704} 680}
705 681
706
707module_init(ieee80211_crypto_tkip_init); 682module_init(ieee80211_crypto_tkip_init);
708module_exit(ieee80211_crypto_tkip_exit); 683module_exit(ieee80211_crypto_tkip_exit);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index bec1d3470d39..b4d2514a0902 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -20,7 +20,6 @@
20 20
21#include <net/ieee80211.h> 21#include <net/ieee80211.h>
22 22
23
24#include <linux/crypto.h> 23#include <linux/crypto.h>
25#include <asm/scatterlist.h> 24#include <asm/scatterlist.h>
26#include <linux/crc32.h> 25#include <linux/crc32.h>
@@ -29,7 +28,6 @@ MODULE_AUTHOR("Jouni Malinen");
29MODULE_DESCRIPTION("Host AP crypt: WEP"); 28MODULE_DESCRIPTION("Host AP crypt: WEP");
30MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
31 30
32
33struct prism2_wep_data { 31struct prism2_wep_data {
34 u32 iv; 32 u32 iv;
35#define WEP_KEY_LEN 13 33#define WEP_KEY_LEN 13
@@ -39,8 +37,7 @@ struct prism2_wep_data {
39 struct crypto_tfm *tfm; 37 struct crypto_tfm *tfm;
40}; 38};
41 39
42 40static void *prism2_wep_init(int keyidx)
43static void * prism2_wep_init(int keyidx)
44{ 41{
45 struct prism2_wep_data *priv; 42 struct prism2_wep_data *priv;
46 43
@@ -62,7 +59,7 @@ static void * prism2_wep_init(int keyidx)
62 59
63 return priv; 60 return priv;
64 61
65fail: 62 fail:
66 if (priv) { 63 if (priv) {
67 if (priv->tfm) 64 if (priv->tfm)
68 crypto_free_tfm(priv->tfm); 65 crypto_free_tfm(priv->tfm);
@@ -71,7 +68,6 @@ fail:
71 return NULL; 68 return NULL;
72} 69}
73 70
74
75static void prism2_wep_deinit(void *priv) 71static void prism2_wep_deinit(void *priv)
76{ 72{
77 struct prism2_wep_data *_priv = priv; 73 struct prism2_wep_data *_priv = priv;
@@ -80,7 +76,6 @@ static void prism2_wep_deinit(void *priv)
80 kfree(priv); 76 kfree(priv);
81} 77}
82 78
83
84/* Perform WEP encryption on given skb that has at least 4 bytes of headroom 79/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
85 * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, 80 * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
86 * so the payload length increases with 8 bytes. 81 * so the payload length increases with 8 bytes.
@@ -143,7 +138,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
143 return 0; 138 return 0;
144} 139}
145 140
146
147/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of 141/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
148 * the frame: IV (4 bytes), encrypted payload (including SNAP header), 142 * the frame: IV (4 bytes), encrypted payload (including SNAP header),
149 * ICV (4 bytes). len includes both IV and ICV. 143 * ICV (4 bytes). len includes both IV and ICV.
@@ -202,8 +196,7 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
202 return 0; 196 return 0;
203} 197}
204 198
205 199static int prism2_wep_set_key(void *key, int len, u8 * seq, void *priv)
206static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
207{ 200{
208 struct prism2_wep_data *wep = priv; 201 struct prism2_wep_data *wep = priv;
209 202
@@ -216,8 +209,7 @@ static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
216 return 0; 209 return 0;
217} 210}
218 211
219 212static int prism2_wep_get_key(void *key, int len, u8 * seq, void *priv)
220static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
221{ 213{
222 struct prism2_wep_data *wep = priv; 214 struct prism2_wep_data *wep = priv;
223 215
@@ -229,16 +221,13 @@ static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
229 return wep->key_len; 221 return wep->key_len;
230} 222}
231 223
232 224static char *prism2_wep_print_stats(char *p, void *priv)
233static char * prism2_wep_print_stats(char *p, void *priv)
234{ 225{
235 struct prism2_wep_data *wep = priv; 226 struct prism2_wep_data *wep = priv;
236 p += sprintf(p, "key[%d] alg=WEP len=%d\n", 227 p += sprintf(p, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len);
237 wep->key_idx, wep->key_len);
238 return p; 228 return p;
239} 229}
240 230
241
242static struct ieee80211_crypto_ops ieee80211_crypt_wep = { 231static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
243 .name = "WEP", 232 .name = "WEP",
244 .init = prism2_wep_init, 233 .init = prism2_wep_init,
@@ -250,23 +239,20 @@ static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
250 .set_key = prism2_wep_set_key, 239 .set_key = prism2_wep_set_key,
251 .get_key = prism2_wep_get_key, 240 .get_key = prism2_wep_get_key,
252 .print_stats = prism2_wep_print_stats, 241 .print_stats = prism2_wep_print_stats,
253 .extra_prefix_len = 4, /* IV */ 242 .extra_prefix_len = 4, /* IV */
254 .extra_postfix_len = 4, /* ICV */ 243 .extra_postfix_len = 4, /* ICV */
255 .owner = THIS_MODULE, 244 .owner = THIS_MODULE,
256}; 245};
257 246
258
259static int __init ieee80211_crypto_wep_init(void) 247static int __init ieee80211_crypto_wep_init(void)
260{ 248{
261 return ieee80211_register_crypto_ops(&ieee80211_crypt_wep); 249 return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
262} 250}
263 251
264
265static void __exit ieee80211_crypto_wep_exit(void) 252static void __exit ieee80211_crypto_wep_exit(void)
266{ 253{
267 ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep); 254 ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
268} 255}
269 256
270
271module_init(ieee80211_crypto_wep_init); 257module_init(ieee80211_crypto_wep_init);
272module_exit(ieee80211_crypto_wep_exit); 258module_exit(ieee80211_crypto_wep_exit);
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
index 553acb2e93d5..03a47343ddc7 100644
--- a/net/ieee80211/ieee80211_module.c
+++ b/net/ieee80211/ieee80211_module.c
@@ -54,7 +54,8 @@
54#include <net/ieee80211.h> 54#include <net/ieee80211.h>
55 55
56MODULE_DESCRIPTION("802.11 data/management/control stack"); 56MODULE_DESCRIPTION("802.11 data/management/control stack");
57MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>"); 57MODULE_AUTHOR
58 ("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
58MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
59 60
60#define DRV_NAME "ieee80211" 61#define DRV_NAME "ieee80211"
@@ -64,9 +65,9 @@ static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
64 if (ieee->networks) 65 if (ieee->networks)
65 return 0; 66 return 0;
66 67
67 ieee->networks = kmalloc( 68 ieee->networks =
68 MAX_NETWORK_COUNT * sizeof(struct ieee80211_network), 69 kmalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
69 GFP_KERNEL); 70 GFP_KERNEL);
70 if (!ieee->networks) { 71 if (!ieee->networks) {
71 printk(KERN_WARNING "%s: Out of memory allocating beacons\n", 72 printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
72 ieee->dev->name); 73 ieee->dev->name);
@@ -94,10 +95,10 @@ static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
94 INIT_LIST_HEAD(&ieee->network_free_list); 95 INIT_LIST_HEAD(&ieee->network_free_list);
95 INIT_LIST_HEAD(&ieee->network_list); 96 INIT_LIST_HEAD(&ieee->network_list);
96 for (i = 0; i < MAX_NETWORK_COUNT; i++) 97 for (i = 0; i < MAX_NETWORK_COUNT; i++)
97 list_add_tail(&ieee->networks[i].list, &ieee->network_free_list); 98 list_add_tail(&ieee->networks[i].list,
99 &ieee->network_free_list);
98} 100}
99 101
100
101struct net_device *alloc_ieee80211(int sizeof_priv) 102struct net_device *alloc_ieee80211(int sizeof_priv)
102{ 103{
103 struct ieee80211_device *ieee; 104 struct ieee80211_device *ieee;
@@ -118,8 +119,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
118 119
119 err = ieee80211_networks_allocate(ieee); 120 err = ieee80211_networks_allocate(ieee);
120 if (err) { 121 if (err) {
121 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", 122 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err);
122 err);
123 goto failed; 123 goto failed;
124 } 124 }
125 ieee80211_networks_initialize(ieee); 125 ieee80211_networks_initialize(ieee);
@@ -132,7 +132,7 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
132 /* Default to enabling full open WEP with host based encrypt/decrypt */ 132 /* Default to enabling full open WEP with host based encrypt/decrypt */
133 ieee->host_encrypt = 1; 133 ieee->host_encrypt = 1;
134 ieee->host_decrypt = 1; 134 ieee->host_decrypt = 1;
135 ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ 135 ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
136 136
137 INIT_LIST_HEAD(&ieee->crypt_deinit_list); 137 INIT_LIST_HEAD(&ieee->crypt_deinit_list);
138 init_timer(&ieee->crypt_deinit_timer); 138 init_timer(&ieee->crypt_deinit_timer);
@@ -141,21 +141,20 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
141 141
142 spin_lock_init(&ieee->lock); 142 spin_lock_init(&ieee->lock);
143 143
144 ieee->wpa_enabled = 0; 144 ieee->wpa_enabled = 0;
145 ieee->tkip_countermeasures = 0; 145 ieee->tkip_countermeasures = 0;
146 ieee->drop_unencrypted = 0; 146 ieee->drop_unencrypted = 0;
147 ieee->privacy_invoked = 0; 147 ieee->privacy_invoked = 0;
148 ieee->ieee802_1x = 1; 148 ieee->ieee802_1x = 1;
149 149
150 return dev; 150 return dev;
151 151
152 failed: 152 failed:
153 if (dev) 153 if (dev)
154 free_netdev(dev); 154 free_netdev(dev);
155 return NULL; 155 return NULL;
156} 156}
157 157
158
159void free_ieee80211(struct net_device *dev) 158void free_ieee80211(struct net_device *dev)
160{ 159{
161 struct ieee80211_device *ieee = netdev_priv(dev); 160 struct ieee80211_device *ieee = netdev_priv(dev);
@@ -193,7 +192,7 @@ static int show_debug_level(char *page, char **start, off_t offset,
193 return snprintf(page, count, "0x%08X\n", ieee80211_debug_level); 192 return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
194} 193}
195 194
196static int store_debug_level(struct file *file, const char __user *buffer, 195static int store_debug_level(struct file *file, const char __user * buffer,
197 unsigned long count, void *data) 196 unsigned long count, void *data)
198{ 197{
199 char buf[] = "0x00000000"; 198 char buf[] = "0x00000000";
@@ -264,13 +263,12 @@ static void __exit ieee80211_exit(void)
264module_param(debug, int, 0444); 263module_param(debug, int, 0444);
265MODULE_PARM_DESC(debug, "debug output mask"); 264MODULE_PARM_DESC(debug, "debug output mask");
266 265
267
268module_exit(ieee80211_exit); 266module_exit(ieee80211_exit);
269module_init(ieee80211_init); 267module_init(ieee80211_init);
270#endif 268#endif
271 269
272 270const char *escape_essid(const char *essid, u8 essid_len)
273const char *escape_essid(const char *essid, u8 essid_len) { 271{
274 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 272 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
275 const char *s = essid; 273 const char *s = essid;
276 char *d = escaped; 274 char *d = escaped;
@@ -280,7 +278,7 @@ const char *escape_essid(const char *essid, u8 essid_len) {
280 return escaped; 278 return escaped;
281 } 279 }
282 280
283 essid_len = min(essid_len, (u8)IW_ESSID_MAX_SIZE); 281 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
284 while (essid_len--) { 282 while (essid_len--) {
285 if (*s == '\0') { 283 if (*s == '\0') {
286 *d++ = '\\'; 284 *d++ = '\\';
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index a5905f53aed7..f7dcd854139e 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -52,11 +52,14 @@ static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
52 netif_rx(skb); 52 netif_rx(skb);
53} 53}
54 54
55
56/* Called only as a tasklet (software IRQ) */ 55/* Called only as a tasklet (software IRQ) */
57static struct ieee80211_frag_entry * 56static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct
58ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq, 57 ieee80211_device
59 unsigned int frag, u8 *src, u8 *dst) 58 *ieee,
59 unsigned int seq,
60 unsigned int frag,
61 u8 * src,
62 u8 * dst)
60{ 63{
61 struct ieee80211_frag_entry *entry; 64 struct ieee80211_frag_entry *entry;
62 int i; 65 int i;
@@ -65,10 +68,9 @@ ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
65 entry = &ieee->frag_cache[i]; 68 entry = &ieee->frag_cache[i];
66 if (entry->skb != NULL && 69 if (entry->skb != NULL &&
67 time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 70 time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
68 IEEE80211_DEBUG_FRAG( 71 IEEE80211_DEBUG_FRAG("expiring fragment cache entry "
69 "expiring fragment cache entry " 72 "seq=%u last_frag=%u\n",
70 "seq=%u last_frag=%u\n", 73 entry->seq, entry->last_frag);
71 entry->seq, entry->last_frag);
72 dev_kfree_skb_any(entry->skb); 74 dev_kfree_skb_any(entry->skb);
73 entry->skb = NULL; 75 entry->skb = NULL;
74 } 76 }
@@ -84,9 +86,8 @@ ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
84} 86}
85 87
86/* Called only as a tasklet (software IRQ) */ 88/* Called only as a tasklet (software IRQ) */
87static struct sk_buff * 89static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
88ieee80211_frag_cache_get(struct ieee80211_device *ieee, 90 struct ieee80211_hdr *hdr)
89 struct ieee80211_hdr *hdr)
90{ 91{
91 struct sk_buff *skb = NULL; 92 struct sk_buff *skb = NULL;
92 u16 sc; 93 u16 sc;
@@ -101,9 +102,9 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
101 /* Reserve enough space to fit maximum frame length */ 102 /* Reserve enough space to fit maximum frame length */
102 skb = dev_alloc_skb(ieee->dev->mtu + 103 skb = dev_alloc_skb(ieee->dev->mtu +
103 sizeof(struct ieee80211_hdr) + 104 sizeof(struct ieee80211_hdr) +
104 8 /* LLC */ + 105 8 /* LLC */ +
105 2 /* alignment */ + 106 2 /* alignment */ +
106 8 /* WEP */ + ETH_ALEN /* WDS */); 107 8 /* WEP */ + ETH_ALEN /* WDS */ );
107 if (skb == NULL) 108 if (skb == NULL)
108 return NULL; 109 return NULL;
109 110
@@ -135,7 +136,6 @@ ieee80211_frag_cache_get(struct ieee80211_device *ieee,
135 return skb; 136 return skb;
136} 137}
137 138
138
139/* Called only as a tasklet (software IRQ) */ 139/* Called only as a tasklet (software IRQ) */
140static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, 140static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
141 struct ieee80211_hdr *hdr) 141 struct ieee80211_hdr *hdr)
@@ -151,9 +151,8 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
151 hdr->addr1); 151 hdr->addr1);
152 152
153 if (entry == NULL) { 153 if (entry == NULL) {
154 IEEE80211_DEBUG_FRAG( 154 IEEE80211_DEBUG_FRAG("could not invalidate fragment cache "
155 "could not invalidate fragment cache " 155 "entry (seq=%u)\n", seq);
156 "entry (seq=%u)\n", seq);
157 return -1; 156 return -1;
158 } 157 }
159 158
@@ -161,7 +160,6 @@ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
161 return 0; 160 return 0;
162} 161}
163 162
164
165#ifdef NOT_YET 163#ifdef NOT_YET
166/* ieee80211_rx_frame_mgtmt 164/* ieee80211_rx_frame_mgtmt
167 * 165 *
@@ -201,7 +199,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
201 return 0; 199 return 0;
202 } 200 }
203 201
204 if (ieee->iw_mode == IW_MODE_MASTER) { 202 if (ieee->iw_mode == IW_MODE_MASTER) {
205 if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) { 203 if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
206 printk(KERN_DEBUG "%s: unknown management frame " 204 printk(KERN_DEBUG "%s: unknown management frame "
207 "(type=0x%02x, stype=0x%02x) dropped\n", 205 "(type=0x%02x, stype=0x%02x) dropped\n",
@@ -219,14 +217,13 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
219} 217}
220#endif 218#endif
221 219
222
223/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ 220/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
224/* Ethernet-II snap header (RFC1042 for most EtherTypes) */ 221/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
225static unsigned char rfc1042_header[] = 222static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
226{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; 223
227/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ 224/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
228static unsigned char bridge_tunnel_header[] = 225static unsigned char bridge_tunnel_header[] =
229{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; 226 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
230/* No encapsulation header if EtherType < 0x600 (=length) */ 227/* No encapsulation header if EtherType < 0x600 (=length) */
231 228
232/* Called by ieee80211_rx_frame_decrypt */ 229/* Called by ieee80211_rx_frame_decrypt */
@@ -241,7 +238,7 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
241 if (skb->len < 24) 238 if (skb->len < 24)
242 return 0; 239 return 0;
243 240
244 hdr = (struct ieee80211_hdr *) skb->data; 241 hdr = (struct ieee80211_hdr *)skb->data;
245 fc = le16_to_cpu(hdr->frame_ctl); 242 fc = le16_to_cpu(hdr->frame_ctl);
246 243
247 /* check that the frame is unicast frame to us */ 244 /* check that the frame is unicast frame to us */
@@ -271,7 +268,7 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
271 268
272/* Called only as a tasklet (software IRQ), by ieee80211_rx */ 269/* Called only as a tasklet (software IRQ), by ieee80211_rx */
273static inline int 270static inline int
274ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb, 271ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
275 struct ieee80211_crypt_data *crypt) 272 struct ieee80211_crypt_data *crypt)
276{ 273{
277 struct ieee80211_hdr *hdr; 274 struct ieee80211_hdr *hdr;
@@ -280,12 +277,11 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
280 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) 277 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
281 return 0; 278 return 0;
282 279
283 hdr = (struct ieee80211_hdr *) skb->data; 280 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 281 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
285 282
286#ifdef CONFIG_IEEE80211_CRYPT_TKIP 283#ifdef CONFIG_IEEE80211_CRYPT_TKIP
287 if (ieee->tkip_countermeasures && 284 if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) {
288 strcmp(crypt->ops->name, "TKIP") == 0) {
289 if (net_ratelimit()) { 285 if (net_ratelimit()) {
290 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " 286 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
291 "received packet from " MAC_FMT "\n", 287 "received packet from " MAC_FMT "\n",
@@ -299,9 +295,8 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
299 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); 295 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
300 atomic_dec(&crypt->refcnt); 296 atomic_dec(&crypt->refcnt);
301 if (res < 0) { 297 if (res < 0) {
302 IEEE80211_DEBUG_DROP( 298 IEEE80211_DEBUG_DROP("decryption failed (SA=" MAC_FMT
303 "decryption failed (SA=" MAC_FMT 299 ") res=%d\n", MAC_ARG(hdr->addr2), res);
304 ") res=%d\n", MAC_ARG(hdr->addr2), res);
305 if (res == -2) 300 if (res == -2)
306 IEEE80211_DEBUG_DROP("Decryption failed ICV " 301 IEEE80211_DEBUG_DROP("Decryption failed ICV "
307 "mismatch (key %d)\n", 302 "mismatch (key %d)\n",
@@ -313,11 +308,11 @@ ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
313 return res; 308 return res;
314} 309}
315 310
316
317/* Called only as a tasklet (software IRQ), by ieee80211_rx */ 311/* Called only as a tasklet (software IRQ), by ieee80211_rx */
318static inline int 312static inline int
319ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb, 313ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
320 int keyidx, struct ieee80211_crypt_data *crypt) 314 struct sk_buff *skb, int keyidx,
315 struct ieee80211_crypt_data *crypt)
321{ 316{
322 struct ieee80211_hdr *hdr; 317 struct ieee80211_hdr *hdr;
323 int res, hdrlen; 318 int res, hdrlen;
@@ -325,7 +320,7 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *s
325 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) 320 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
326 return 0; 321 return 0;
327 322
328 hdr = (struct ieee80211_hdr *) skb->data; 323 hdr = (struct ieee80211_hdr *)skb->data;
329 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 324 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
330 325
331 atomic_inc(&crypt->refcnt); 326 atomic_inc(&crypt->refcnt);
@@ -341,7 +336,6 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *s
341 return 0; 336 return 0;
342} 337}
343 338
344
345/* All received frames are sent to this function. @skb contains the frame in 339/* All received frames are sent to this function. @skb contains the frame in
346 * IEEE 802.11 format, i.e., in the format it was sent over air. 340 * IEEE 802.11 format, i.e., in the format it was sent over air.
347 * This function is called only as a tasklet (software IRQ). */ 341 * This function is called only as a tasklet (software IRQ). */
@@ -373,8 +367,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
373 stats = &ieee->stats; 367 stats = &ieee->stats;
374 368
375 if (skb->len < 10) { 369 if (skb->len < 10) {
376 printk(KERN_INFO "%s: SKB length < 10\n", 370 printk(KERN_INFO "%s: SKB length < 10\n", dev->name);
377 dev->name);
378 goto rx_dropped; 371 goto rx_dropped;
379 } 372 }
380 373
@@ -399,8 +392,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
399 /* Update spy records */ 392 /* Update spy records */
400 wireless_spy_update(dev, hdr->addr2, &wstats); 393 wireless_spy_update(dev, hdr->addr2, &wstats);
401 } 394 }
402#endif /* IW_WIRELESS_SPY */ 395#endif /* IW_WIRELESS_SPY */
403#endif /* WIRELESS_EXT > 15 */ 396#endif /* WIRELESS_EXT > 15 */
404 hostap_update_rx_stats(local->ap, hdr, rx_stats); 397 hostap_update_rx_stats(local->ap, hdr, rx_stats);
405#endif 398#endif
406 399
@@ -429,8 +422,8 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
429 * stations that do not support WEP key mapping). */ 422 * stations that do not support WEP key mapping). */
430 423
431 if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) 424 if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
432 (void) hostap_handle_sta_crypto(local, hdr, &crypt, 425 (void)hostap_handle_sta_crypto(local, hdr, &crypt,
433 &sta); 426 &sta);
434#endif 427#endif
435 428
436 /* allow NULL decrypt to indicate an station specific override 429 /* allow NULL decrypt to indicate an station specific override
@@ -451,13 +444,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
451 goto rx_dropped; 444 goto rx_dropped;
452 } 445 }
453 } 446 }
454
455#ifdef NOT_YET 447#ifdef NOT_YET
456 if (type != WLAN_FC_TYPE_DATA) { 448 if (type != WLAN_FC_TYPE_DATA) {
457 if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH && 449 if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH &&
458 fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt && 450 fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt &&
459 (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) 451 (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
460 {
461 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " 452 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
462 "from " MAC_FMT "\n", dev->name, 453 "from " MAC_FMT "\n", dev->name,
463 MAC_ARG(hdr->addr2)); 454 MAC_ARG(hdr->addr2));
@@ -507,9 +498,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
507 } 498 }
508 499
509 if (ieee->iw_mode == IW_MODE_MASTER && !wds && 500 if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
510 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && 501 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
511 ieee->stadev && 502 IEEE80211_FCTL_FROMDS && ieee->stadev
512 memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) { 503 && memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
513 /* Frame from BSSID of the AP for which we are a client */ 504 /* Frame from BSSID of the AP for which we are a client */
514 skb->dev = dev = ieee->stadev; 505 skb->dev = dev = ieee->stadev;
515 stats = hostap_get_stats(dev); 506 stats = hostap_get_stats(dev);
@@ -521,8 +512,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
521 512
522#ifdef NOT_YET 513#ifdef NOT_YET
523 if ((ieee->iw_mode == IW_MODE_MASTER || 514 if ((ieee->iw_mode == IW_MODE_MASTER ||
524 ieee->iw_mode == IW_MODE_REPEAT) && 515 ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) {
525 !from_assoc_ap) {
526 switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, 516 switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
527 wds != NULL)) { 517 wds != NULL)) {
528 case AP_RX_CONTINUE_NOT_AUTHORIZED: 518 case AP_RX_CONTINUE_NOT_AUTHORIZED:
@@ -546,11 +536,10 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
546 stype != IEEE80211_STYPE_DATA_CFPOLL && 536 stype != IEEE80211_STYPE_DATA_CFPOLL &&
547 stype != IEEE80211_STYPE_DATA_CFACKPOLL) { 537 stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
548 if (stype != IEEE80211_STYPE_NULLFUNC) 538 if (stype != IEEE80211_STYPE_NULLFUNC)
549 IEEE80211_DEBUG_DROP( 539 IEEE80211_DEBUG_DROP("RX: dropped data frame "
550 "RX: dropped data frame " 540 "with no data (type=0x%02x, "
551 "with no data (type=0x%02x, " 541 "subtype=0x%02x, len=%d)\n",
552 "subtype=0x%02x, len=%d)\n", 542 type, stype, skb->len);
553 type, stype, skb->len);
554 goto rx_dropped; 543 goto rx_dropped;
555 } 544 }
556 545
@@ -560,7 +549,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
560 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) 549 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
561 goto rx_dropped; 550 goto rx_dropped;
562 551
563 hdr = (struct ieee80211_hdr *) skb->data; 552 hdr = (struct ieee80211_hdr *)skb->data;
564 553
565 /* skb: hdr + (possibly fragmented) plaintext payload */ 554 /* skb: hdr + (possibly fragmented) plaintext payload */
566 // PR: FIXME: hostap has additional conditions in the "if" below: 555 // PR: FIXME: hostap has additional conditions in the "if" below:
@@ -614,7 +603,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
614 /* this was the last fragment and the frame will be 603 /* this was the last fragment and the frame will be
615 * delivered, so remove skb from fragment cache */ 604 * delivered, so remove skb from fragment cache */
616 skb = frag_skb; 605 skb = frag_skb;
617 hdr = (struct ieee80211_hdr *) skb->data; 606 hdr = (struct ieee80211_hdr *)skb->data;
618 ieee80211_frag_cache_invalidate(ieee, hdr); 607 ieee80211_frag_cache_invalidate(ieee, hdr);
619 } 608 }
620 609
@@ -624,28 +613,26 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
624 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) 613 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
625 goto rx_dropped; 614 goto rx_dropped;
626 615
627 hdr = (struct ieee80211_hdr *) skb->data; 616 hdr = (struct ieee80211_hdr *)skb->data;
628 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { 617 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) {
629 if (/*ieee->ieee802_1x &&*/ 618 if ( /*ieee->ieee802_1x && */
630 ieee80211_is_eapol_frame(ieee, skb)) { 619 ieee80211_is_eapol_frame(ieee, skb)) {
631 /* pass unencrypted EAPOL frames even if encryption is 620 /* pass unencrypted EAPOL frames even if encryption is
632 * configured */ 621 * configured */
633 } else { 622 } else {
634 IEEE80211_DEBUG_DROP( 623 IEEE80211_DEBUG_DROP("encryption configured, but RX "
635 "encryption configured, but RX " 624 "frame not encrypted (SA=" MAC_FMT
636 "frame not encrypted (SA=" MAC_FMT ")\n", 625 ")\n", MAC_ARG(hdr->addr2));
637 MAC_ARG(hdr->addr2));
638 goto rx_dropped; 626 goto rx_dropped;
639 } 627 }
640 } 628 }
641 629
642 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep && 630 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep &&
643 !ieee80211_is_eapol_frame(ieee, skb)) { 631 !ieee80211_is_eapol_frame(ieee, skb)) {
644 IEEE80211_DEBUG_DROP( 632 IEEE80211_DEBUG_DROP("dropped unencrypted RX data "
645 "dropped unencrypted RX data " 633 "frame from " MAC_FMT
646 "frame from " MAC_FMT 634 " (drop_unencrypted=1)\n",
647 " (drop_unencrypted=1)\n", 635 MAC_ARG(hdr->addr2));
648 MAC_ARG(hdr->addr2));
649 goto rx_dropped; 636 goto rx_dropped;
650 } 637 }
651 638
@@ -673,8 +660,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
673 } else if (!frame_authorized) { 660 } else if (!frame_authorized) {
674 printk(KERN_DEBUG "%s: dropped frame from " 661 printk(KERN_DEBUG "%s: dropped frame from "
675 "unauthorized port (IEEE 802.1X): " 662 "unauthorized port (IEEE 802.1X): "
676 "ethertype=0x%04x\n", 663 "ethertype=0x%04x\n", dev->name, ethertype);
677 dev->name, ethertype);
678 goto rx_dropped; 664 goto rx_dropped;
679 } 665 }
680 } 666 }
@@ -702,8 +688,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
702 688
703#ifdef NOT_YET 689#ifdef NOT_YET
704 if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == 690 if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
705 IEEE80211_FCTL_TODS) && 691 IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) {
706 skb->len >= ETH_HLEN + ETH_ALEN) {
707 /* Non-standard frame: get addr4 from its bogus location after 692 /* Non-standard frame: get addr4 from its bogus location after
708 * the payload */ 693 * the payload */
709 memcpy(skb->data + ETH_ALEN, 694 memcpy(skb->data + ETH_ALEN,
@@ -716,8 +701,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
716 stats->rx_bytes += skb->len; 701 stats->rx_bytes += skb->len;
717 702
718#ifdef NOT_YET 703#ifdef NOT_YET
719 if (ieee->iw_mode == IW_MODE_MASTER && !wds && 704 if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) {
720 ieee->ap->bridge_packets) {
721 if (dst[0] & 0x01) { 705 if (dst[0] & 0x01) {
722 /* copy multicast frame both to the higher layers and 706 /* copy multicast frame both to the higher layers and
723 * to the wireless media */ 707 * to the wireless media */
@@ -743,25 +727,24 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
743 skb2->dev = dev; 727 skb2->dev = dev;
744 dev_queue_xmit(skb2); 728 dev_queue_xmit(skb2);
745 } 729 }
746
747#endif 730#endif
748 731
749 if (skb) { 732 if (skb) {
750 skb->protocol = eth_type_trans(skb, dev); 733 skb->protocol = eth_type_trans(skb, dev);
751 memset(skb->cb, 0, sizeof(skb->cb)); 734 memset(skb->cb, 0, sizeof(skb->cb));
752 skb->dev = dev; 735 skb->dev = dev;
753 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ 736 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
754 netif_rx(skb); 737 netif_rx(skb);
755 } 738 }
756 739
757 rx_exit: 740 rx_exit:
758#ifdef NOT_YET 741#ifdef NOT_YET
759 if (sta) 742 if (sta)
760 hostap_handle_sta_release(sta); 743 hostap_handle_sta_release(sta);
761#endif 744#endif
762 return 1; 745 return 1;
763 746
764 rx_dropped: 747 rx_dropped:
765 stats->rx_dropped++; 748 stats->rx_dropped++;
766 749
767 /* Returning 0 indicates to caller that we have not handled the SKB-- 750 /* Returning 0 indicates to caller that we have not handled the SKB--
@@ -785,22 +768,21 @@ static inline int ieee80211_is_ofdm_rate(u8 rate)
785 case IEEE80211_OFDM_RATE_54MB: 768 case IEEE80211_OFDM_RATE_54MB:
786 return 1; 769 return 1;
787 } 770 }
788 return 0; 771 return 0;
789} 772}
790 773
791 774static inline int ieee80211_network_init(struct ieee80211_device *ieee,
792static inline int ieee80211_network_init( 775 struct ieee80211_probe_response
793 struct ieee80211_device *ieee, 776 *beacon,
794 struct ieee80211_probe_response *beacon, 777 struct ieee80211_network *network,
795 struct ieee80211_network *network, 778 struct ieee80211_rx_stats *stats)
796 struct ieee80211_rx_stats *stats)
797{ 779{
798#ifdef CONFIG_IEEE80211_DEBUG 780#ifdef CONFIG_IEEE80211_DEBUG
799 char rates_str[64]; 781 char rates_str[64];
800 char *p; 782 char *p;
801#endif 783#endif
802 struct ieee80211_info_element *info_element; 784 struct ieee80211_info_element *info_element;
803 u16 left; 785 u16 left;
804 u8 i; 786 u8 i;
805 787
806 /* Pull out fixed field data */ 788 /* Pull out fixed field data */
@@ -810,7 +792,7 @@ static inline int ieee80211_network_init(
810 network->time_stamp[0] = beacon->time_stamp[0]; 792 network->time_stamp[0] = beacon->time_stamp[0];
811 network->time_stamp[1] = beacon->time_stamp[1]; 793 network->time_stamp[1] = beacon->time_stamp[1];
812 network->beacon_interval = beacon->beacon_interval; 794 network->beacon_interval = beacon->beacon_interval;
813 /* Where to pull this? beacon->listen_interval;*/ 795 /* Where to pull this? beacon->listen_interval; */
814 network->listen_interval = 0x0A; 796 network->listen_interval = 0x0A;
815 network->rates_len = network->rates_ex_len = 0; 797 network->rates_len = network->rates_ex_len = 0;
816 network->last_associate = 0; 798 network->last_associate = 0;
@@ -824,18 +806,20 @@ static inline int ieee80211_network_init(
824 } else 806 } else
825 network->flags |= NETWORK_HAS_CCK; 807 network->flags |= NETWORK_HAS_CCK;
826 808
827 network->wpa_ie_len = 0; 809 network->wpa_ie_len = 0;
828 network->rsn_ie_len = 0; 810 network->rsn_ie_len = 0;
829 811
830 info_element = &beacon->info_element; 812 info_element = &beacon->info_element;
831 left = stats->len - ((void *)info_element - (void *)beacon); 813 left = stats->len - ((void *)info_element - (void *)beacon);
832 while (left >= sizeof(struct ieee80211_info_element_hdr)) { 814 while (left >= sizeof(struct ieee80211_info_element_hdr)) {
833 if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) { 815 if (sizeof(struct ieee80211_info_element_hdr) +
834 IEEE80211_DEBUG_SCAN("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n", 816 info_element->len > left) {
835 info_element->len + sizeof(struct ieee80211_info_element), 817 IEEE80211_DEBUG_SCAN
836 left); 818 ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n",
819 info_element->len +
820 sizeof(struct ieee80211_info_element), left);
837 return 1; 821 return 1;
838 } 822 }
839 823
840 switch (info_element->id) { 824 switch (info_element->id) {
841 case MFIE_TYPE_SSID: 825 case MFIE_TYPE_SSID:
@@ -846,10 +830,11 @@ static inline int ieee80211_network_init(
846 } 830 }
847 831
848 network->ssid_len = min(info_element->len, 832 network->ssid_len = min(info_element->len,
849 (u8)IW_ESSID_MAX_SIZE); 833 (u8) IW_ESSID_MAX_SIZE);
850 memcpy(network->ssid, info_element->data, network->ssid_len); 834 memcpy(network->ssid, info_element->data,
851 if (network->ssid_len < IW_ESSID_MAX_SIZE) 835 network->ssid_len);
852 memset(network->ssid + network->ssid_len, 0, 836 if (network->ssid_len < IW_ESSID_MAX_SIZE)
837 memset(network->ssid + network->ssid_len, 0,
853 IW_ESSID_MAX_SIZE - network->ssid_len); 838 IW_ESSID_MAX_SIZE - network->ssid_len);
854 839
855 IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n", 840 IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n",
@@ -860,18 +845,23 @@ static inline int ieee80211_network_init(
860#ifdef CONFIG_IEEE80211_DEBUG 845#ifdef CONFIG_IEEE80211_DEBUG
861 p = rates_str; 846 p = rates_str;
862#endif 847#endif
863 network->rates_len = min(info_element->len, MAX_RATES_LENGTH); 848 network->rates_len =
849 min(info_element->len, MAX_RATES_LENGTH);
864 for (i = 0; i < network->rates_len; i++) { 850 for (i = 0; i < network->rates_len; i++) {
865 network->rates[i] = info_element->data[i]; 851 network->rates[i] = info_element->data[i];
866#ifdef CONFIG_IEEE80211_DEBUG 852#ifdef CONFIG_IEEE80211_DEBUG
867 p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); 853 p += snprintf(p,
854 sizeof(rates_str) - (p -
855 rates_str),
856 "%02X ", network->rates[i]);
868#endif 857#endif
869 if (ieee80211_is_ofdm_rate(info_element->data[i])) { 858 if (ieee80211_is_ofdm_rate
859 (info_element->data[i])) {
870 network->flags |= NETWORK_HAS_OFDM; 860 network->flags |= NETWORK_HAS_OFDM;
871 if (info_element->data[i] & 861 if (info_element->data[i] &
872 IEEE80211_BASIC_RATE_MASK) 862 IEEE80211_BASIC_RATE_MASK)
873 network->flags &= 863 network->flags &=
874 ~NETWORK_HAS_CCK; 864 ~NETWORK_HAS_CCK;
875 } 865 }
876 } 866 }
877 867
@@ -883,18 +873,23 @@ static inline int ieee80211_network_init(
883#ifdef CONFIG_IEEE80211_DEBUG 873#ifdef CONFIG_IEEE80211_DEBUG
884 p = rates_str; 874 p = rates_str;
885#endif 875#endif
886 network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH); 876 network->rates_ex_len =
877 min(info_element->len, MAX_RATES_EX_LENGTH);
887 for (i = 0; i < network->rates_ex_len; i++) { 878 for (i = 0; i < network->rates_ex_len; i++) {
888 network->rates_ex[i] = info_element->data[i]; 879 network->rates_ex[i] = info_element->data[i];
889#ifdef CONFIG_IEEE80211_DEBUG 880#ifdef CONFIG_IEEE80211_DEBUG
890 p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); 881 p += snprintf(p,
882 sizeof(rates_str) - (p -
883 rates_str),
884 "%02X ", network->rates[i]);
891#endif 885#endif
892 if (ieee80211_is_ofdm_rate(info_element->data[i])) { 886 if (ieee80211_is_ofdm_rate
887 (info_element->data[i])) {
893 network->flags |= NETWORK_HAS_OFDM; 888 network->flags |= NETWORK_HAS_OFDM;
894 if (info_element->data[i] & 889 if (info_element->data[i] &
895 IEEE80211_BASIC_RATE_MASK) 890 IEEE80211_BASIC_RATE_MASK)
896 network->flags &= 891 network->flags &=
897 ~NETWORK_HAS_CCK; 892 ~NETWORK_HAS_CCK;
898 } 893 }
899 } 894 }
900 895
@@ -903,14 +898,14 @@ static inline int ieee80211_network_init(
903 break; 898 break;
904 899
905 case MFIE_TYPE_DS_SET: 900 case MFIE_TYPE_DS_SET:
906 IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n", 901 IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n",
907 info_element->data[0]); 902 info_element->data[0]);
908 if (stats->freq == IEEE80211_24GHZ_BAND) 903 if (stats->freq == IEEE80211_24GHZ_BAND)
909 network->channel = info_element->data[0]; 904 network->channel = info_element->data[0];
910 break; 905 break;
911 906
912 case MFIE_TYPE_FH_SET: 907 case MFIE_TYPE_FH_SET:
913 IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n"); 908 IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n");
914 break; 909 break;
915 910
916 case MFIE_TYPE_CF_SET: 911 case MFIE_TYPE_CF_SET:
@@ -932,13 +927,13 @@ static inline int ieee80211_network_init(
932 case MFIE_TYPE_GENERIC: 927 case MFIE_TYPE_GENERIC:
933 IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", 928 IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n",
934 info_element->len); 929 info_element->len);
935 if (info_element->len >= 4 && 930 if (info_element->len >= 4 &&
936 info_element->data[0] == 0x00 && 931 info_element->data[0] == 0x00 &&
937 info_element->data[1] == 0x50 && 932 info_element->data[1] == 0x50 &&
938 info_element->data[2] == 0xf2 && 933 info_element->data[2] == 0xf2 &&
939 info_element->data[3] == 0x01) { 934 info_element->data[3] == 0x01) {
940 network->wpa_ie_len = min(info_element->len + 2, 935 network->wpa_ie_len = min(info_element->len + 2,
941 MAX_WPA_IE_LEN); 936 MAX_WPA_IE_LEN);
942 memcpy(network->wpa_ie, info_element, 937 memcpy(network->wpa_ie, info_element,
943 network->wpa_ie_len); 938 network->wpa_ie_len);
944 } 939 }
@@ -948,7 +943,7 @@ static inline int ieee80211_network_init(
948 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n", 943 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n",
949 info_element->len); 944 info_element->len);
950 network->rsn_ie_len = min(info_element->len + 2, 945 network->rsn_ie_len = min(info_element->len + 2,
951 MAX_WPA_IE_LEN); 946 MAX_WPA_IE_LEN);
952 memcpy(network->rsn_ie, info_element, 947 memcpy(network->rsn_ie, info_element,
953 network->rsn_ie_len); 948 network->rsn_ie_len);
954 break; 949 break;
@@ -956,14 +951,14 @@ static inline int ieee80211_network_init(
956 default: 951 default:
957 IEEE80211_DEBUG_SCAN("unsupported IE %d\n", 952 IEEE80211_DEBUG_SCAN("unsupported IE %d\n",
958 info_element->id); 953 info_element->id);
959 break; 954 break;
960 } 955 }
961 956
962 left -= sizeof(struct ieee80211_info_element_hdr) + 957 left -= sizeof(struct ieee80211_info_element_hdr) +
963 info_element->len; 958 info_element->len;
964 info_element = (struct ieee80211_info_element *) 959 info_element = (struct ieee80211_info_element *)
965 &info_element->data[info_element->len]; 960 &info_element->data[info_element->len];
966 } 961 }
967 962
968 network->mode = 0; 963 network->mode = 0;
969 if (stats->freq == IEEE80211_52GHZ_BAND) 964 if (stats->freq == IEEE80211_52GHZ_BAND)
@@ -1032,10 +1027,13 @@ static inline void update_network(struct ieee80211_network *dst,
1032 /* dst->last_associate is not overwritten */ 1027 /* dst->last_associate is not overwritten */
1033} 1028}
1034 1029
1035static inline void ieee80211_process_probe_response( 1030static inline void ieee80211_process_probe_response(struct ieee80211_device
1036 struct ieee80211_device *ieee, 1031 *ieee,
1037 struct ieee80211_probe_response *beacon, 1032 struct
1038 struct ieee80211_rx_stats *stats) 1033 ieee80211_probe_response
1034 *beacon,
1035 struct ieee80211_rx_stats
1036 *stats)
1039{ 1037{
1040 struct ieee80211_network network; 1038 struct ieee80211_network network;
1041 struct ieee80211_network *target; 1039 struct ieee80211_network *target;
@@ -1045,33 +1043,35 @@ static inline void ieee80211_process_probe_response(
1045#endif 1043#endif
1046 unsigned long flags; 1044 unsigned long flags;
1047 1045
1048 IEEE80211_DEBUG_SCAN( 1046 IEEE80211_DEBUG_SCAN("'%s' (" MAC_FMT
1049 "'%s' (" MAC_FMT "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", 1047 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
1050 escape_essid(info_element->data, info_element->len), 1048 escape_essid(info_element->data,
1051 MAC_ARG(beacon->header.addr3), 1049 info_element->len),
1052 (beacon->capability & (1<<0xf)) ? '1' : '0', 1050 MAC_ARG(beacon->header.addr3),
1053 (beacon->capability & (1<<0xe)) ? '1' : '0', 1051 (beacon->capability & (1 << 0xf)) ? '1' : '0',
1054 (beacon->capability & (1<<0xd)) ? '1' : '0', 1052 (beacon->capability & (1 << 0xe)) ? '1' : '0',
1055 (beacon->capability & (1<<0xc)) ? '1' : '0', 1053 (beacon->capability & (1 << 0xd)) ? '1' : '0',
1056 (beacon->capability & (1<<0xb)) ? '1' : '0', 1054 (beacon->capability & (1 << 0xc)) ? '1' : '0',
1057 (beacon->capability & (1<<0xa)) ? '1' : '0', 1055 (beacon->capability & (1 << 0xb)) ? '1' : '0',
1058 (beacon->capability & (1<<0x9)) ? '1' : '0', 1056 (beacon->capability & (1 << 0xa)) ? '1' : '0',
1059 (beacon->capability & (1<<0x8)) ? '1' : '0', 1057 (beacon->capability & (1 << 0x9)) ? '1' : '0',
1060 (beacon->capability & (1<<0x7)) ? '1' : '0', 1058 (beacon->capability & (1 << 0x8)) ? '1' : '0',
1061 (beacon->capability & (1<<0x6)) ? '1' : '0', 1059 (beacon->capability & (1 << 0x7)) ? '1' : '0',
1062 (beacon->capability & (1<<0x5)) ? '1' : '0', 1060 (beacon->capability & (1 << 0x6)) ? '1' : '0',
1063 (beacon->capability & (1<<0x4)) ? '1' : '0', 1061 (beacon->capability & (1 << 0x5)) ? '1' : '0',
1064 (beacon->capability & (1<<0x3)) ? '1' : '0', 1062 (beacon->capability & (1 << 0x4)) ? '1' : '0',
1065 (beacon->capability & (1<<0x2)) ? '1' : '0', 1063 (beacon->capability & (1 << 0x3)) ? '1' : '0',
1066 (beacon->capability & (1<<0x1)) ? '1' : '0', 1064 (beacon->capability & (1 << 0x2)) ? '1' : '0',
1067 (beacon->capability & (1<<0x0)) ? '1' : '0'); 1065 (beacon->capability & (1 << 0x1)) ? '1' : '0',
1066 (beacon->capability & (1 << 0x0)) ? '1' : '0');
1068 1067
1069 if (ieee80211_network_init(ieee, beacon, &network, stats)) { 1068 if (ieee80211_network_init(ieee, beacon, &network, stats)) {
1070 IEEE80211_DEBUG_SCAN("Dropped '%s' (" MAC_FMT ") via %s.\n", 1069 IEEE80211_DEBUG_SCAN("Dropped '%s' (" MAC_FMT ") via %s.\n",
1071 escape_essid(info_element->data, 1070 escape_essid(info_element->data,
1072 info_element->len), 1071 info_element->len),
1073 MAC_ARG(beacon->header.addr3), 1072 MAC_ARG(beacon->header.addr3),
1074 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == 1073 WLAN_FC_GET_STYPE(beacon->header.
1074 frame_ctl) ==
1075 IEEE80211_STYPE_PROBE_RESP ? 1075 IEEE80211_STYPE_PROBE_RESP ?
1076 "PROBE RESPONSE" : "BEACON"); 1076 "PROBE RESPONSE" : "BEACON");
1077 return; 1077 return;
@@ -1117,13 +1117,13 @@ static inline void ieee80211_process_probe_response(
1117 list_del(ieee->network_free_list.next); 1117 list_del(ieee->network_free_list.next);
1118 } 1118 }
1119 1119
1120
1121#ifdef CONFIG_IEEE80211_DEBUG 1120#ifdef CONFIG_IEEE80211_DEBUG
1122 IEEE80211_DEBUG_SCAN("Adding '%s' (" MAC_FMT ") via %s.\n", 1121 IEEE80211_DEBUG_SCAN("Adding '%s' (" MAC_FMT ") via %s.\n",
1123 escape_essid(network.ssid, 1122 escape_essid(network.ssid,
1124 network.ssid_len), 1123 network.ssid_len),
1125 MAC_ARG(network.bssid), 1124 MAC_ARG(network.bssid),
1126 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == 1125 WLAN_FC_GET_STYPE(beacon->header.
1126 frame_ctl) ==
1127 IEEE80211_STYPE_PROBE_RESP ? 1127 IEEE80211_STYPE_PROBE_RESP ?
1128 "PROBE RESPONSE" : "BEACON"); 1128 "PROBE RESPONSE" : "BEACON");
1129#endif 1129#endif
@@ -1134,7 +1134,8 @@ static inline void ieee80211_process_probe_response(
1134 escape_essid(target->ssid, 1134 escape_essid(target->ssid,
1135 target->ssid_len), 1135 target->ssid_len),
1136 MAC_ARG(target->bssid), 1136 MAC_ARG(target->bssid),
1137 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == 1137 WLAN_FC_GET_STYPE(beacon->header.
1138 frame_ctl) ==
1138 IEEE80211_STYPE_PROBE_RESP ? 1139 IEEE80211_STYPE_PROBE_RESP ?
1139 "PROBE RESPONSE" : "BEACON"); 1140 "PROBE RESPONSE" : "BEACON");
1140 update_network(target, &network); 1141 update_network(target, &network);
@@ -1162,16 +1163,20 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1162 IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", 1163 IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
1163 WLAN_FC_GET_STYPE(header->frame_ctl)); 1164 WLAN_FC_GET_STYPE(header->frame_ctl));
1164 IEEE80211_DEBUG_SCAN("Probe response\n"); 1165 IEEE80211_DEBUG_SCAN("Probe response\n");
1165 ieee80211_process_probe_response( 1166 ieee80211_process_probe_response(ieee,
1166 ieee, (struct ieee80211_probe_response *)header, stats); 1167 (struct
1168 ieee80211_probe_response *)
1169 header, stats);
1167 break; 1170 break;
1168 1171
1169 case IEEE80211_STYPE_BEACON: 1172 case IEEE80211_STYPE_BEACON:
1170 IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", 1173 IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
1171 WLAN_FC_GET_STYPE(header->frame_ctl)); 1174 WLAN_FC_GET_STYPE(header->frame_ctl));
1172 IEEE80211_DEBUG_SCAN("Beacon\n"); 1175 IEEE80211_DEBUG_SCAN("Beacon\n");
1173 ieee80211_process_probe_response( 1176 ieee80211_process_probe_response(ieee,
1174 ieee, (struct ieee80211_probe_response *)header, stats); 1177 (struct
1178 ieee80211_probe_response *)
1179 header, stats);
1175 break; 1180 break;
1176 1181
1177 default: 1182 default:
@@ -1184,6 +1189,5 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1184 } 1189 }
1185} 1190}
1186 1191
1187
1188EXPORT_SYMBOL(ieee80211_rx_mgt); 1192EXPORT_SYMBOL(ieee80211_rx_mgt);
1189EXPORT_SYMBOL(ieee80211_rx); 1193EXPORT_SYMBOL(ieee80211_rx);
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
index b7ea3e25e25d..c9aaff3fea1e 100644
--- a/net/ieee80211/ieee80211_tx.c
+++ b/net/ieee80211/ieee80211_tx.c
@@ -45,10 +45,8 @@
45 45
46#include <net/ieee80211.h> 46#include <net/ieee80211.h>
47 47
48
49/* 48/*
50 49
51
52802.11 Data Frame 50802.11 Data Frame
53 51
54 ,-------------------------------------------------------------------. 52 ,-------------------------------------------------------------------.
@@ -82,7 +80,6 @@ Desc. | IV | Encrypted | ICV |
82 `-----------------------' 80 `-----------------------'
83Total: 8 non-data bytes 81Total: 8 non-data bytes
84 82
85
86802.3 Ethernet Data Frame 83802.3 Ethernet Data Frame
87 84
88 ,-----------------------------------------. 85 ,-----------------------------------------.
@@ -131,7 +128,7 @@ payload of each frame is reduced to 492 bytes.
131static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; 128static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
132static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; 129static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
133 130
134static inline int ieee80211_put_snap(u8 *data, u16 h_proto) 131static inline int ieee80211_put_snap(u8 * data, u16 h_proto)
135{ 132{
136 struct ieee80211_snap_hdr *snap; 133 struct ieee80211_snap_hdr *snap;
137 u8 *oui; 134 u8 *oui;
@@ -149,17 +146,15 @@ static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
149 snap->oui[1] = oui[1]; 146 snap->oui[1] = oui[1];
150 snap->oui[2] = oui[2]; 147 snap->oui[2] = oui[2];
151 148
152 *(u16 *)(data + SNAP_SIZE) = htons(h_proto); 149 *(u16 *) (data + SNAP_SIZE) = htons(h_proto);
153 150
154 return SNAP_SIZE + sizeof(u16); 151 return SNAP_SIZE + sizeof(u16);
155} 152}
156 153
157static inline int ieee80211_encrypt_fragment( 154static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
158 struct ieee80211_device *ieee, 155 struct sk_buff *frag, int hdr_len)
159 struct sk_buff *frag,
160 int hdr_len)
161{ 156{
162 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx]; 157 struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx];
163 int res; 158 int res;
164 159
165#ifdef CONFIG_IEEE80211_CRYPT_TKIP 160#ifdef CONFIG_IEEE80211_CRYPT_TKIP
@@ -167,7 +162,7 @@ static inline int ieee80211_encrypt_fragment(
167 162
168 if (ieee->tkip_countermeasures && 163 if (ieee->tkip_countermeasures &&
169 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { 164 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
170 header = (struct ieee80211_hdr *) frag->data; 165 header = (struct ieee80211_hdr *)frag->data;
171 if (net_ratelimit()) { 166 if (net_ratelimit()) {
172 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " 167 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
173 "TX packet to " MAC_FMT "\n", 168 "TX packet to " MAC_FMT "\n",
@@ -200,8 +195,8 @@ static inline int ieee80211_encrypt_fragment(
200 return 0; 195 return 0;
201} 196}
202 197
203 198void ieee80211_txb_free(struct ieee80211_txb *txb)
204void ieee80211_txb_free(struct ieee80211_txb *txb) { 199{
205 int i; 200 int i;
206 if (unlikely(!txb)) 201 if (unlikely(!txb))
207 return; 202 return;
@@ -216,9 +211,8 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
216{ 211{
217 struct ieee80211_txb *txb; 212 struct ieee80211_txb *txb;
218 int i; 213 int i;
219 txb = kmalloc( 214 txb = kmalloc(sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
220 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags), 215 gfp_mask);
221 gfp_mask);
222 if (!txb) 216 if (!txb)
223 return NULL; 217 return NULL;
224 218
@@ -243,8 +237,7 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
243} 237}
244 238
245/* SKBs are added to the ieee->tx_queue. */ 239/* SKBs are added to the ieee->tx_queue. */
246int ieee80211_xmit(struct sk_buff *skb, 240int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
247 struct net_device *dev)
248{ 241{
249 struct ieee80211_device *ieee = netdev_priv(dev); 242 struct ieee80211_device *ieee = netdev_priv(dev);
250 struct ieee80211_txb *txb = NULL; 243 struct ieee80211_txb *txb = NULL;
@@ -255,21 +248,20 @@ int ieee80211_xmit(struct sk_buff *skb,
255 int ether_type, encrypt; 248 int ether_type, encrypt;
256 int bytes, fc, hdr_len; 249 int bytes, fc, hdr_len;
257 struct sk_buff *skb_frag; 250 struct sk_buff *skb_frag;
258 struct ieee80211_hdr header = { /* Ensure zero initialized */ 251 struct ieee80211_hdr header = { /* Ensure zero initialized */
259 .duration_id = 0, 252 .duration_id = 0,
260 .seq_ctl = 0 253 .seq_ctl = 0
261 }; 254 };
262 u8 dest[ETH_ALEN], src[ETH_ALEN]; 255 u8 dest[ETH_ALEN], src[ETH_ALEN];
263 256
264 struct ieee80211_crypt_data* crypt; 257 struct ieee80211_crypt_data *crypt;
265 258
266 spin_lock_irqsave(&ieee->lock, flags); 259 spin_lock_irqsave(&ieee->lock, flags);
267 260
268 /* If there is no driver handler to take the TXB, dont' bother 261 /* If there is no driver handler to take the TXB, dont' bother
269 * creating it... */ 262 * creating it... */
270 if (!ieee->hard_start_xmit) { 263 if (!ieee->hard_start_xmit) {
271 printk(KERN_WARNING "%s: No xmit handler.\n", 264 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
272 ieee->dev->name);
273 goto success; 265 goto success;
274 } 266 }
275 267
@@ -284,7 +276,7 @@ int ieee80211_xmit(struct sk_buff *skb,
284 crypt = ieee->crypt[ieee->tx_keyidx]; 276 crypt = ieee->crypt[ieee->tx_keyidx];
285 277
286 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && 278 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
287 ieee->host_encrypt && crypt && crypt->ops; 279 ieee->host_encrypt && crypt && crypt->ops;
288 280
289 if (!encrypt && ieee->ieee802_1x && 281 if (!encrypt && ieee->ieee802_1x &&
290 ieee->drop_unencrypted && ether_type != ETH_P_PAE) { 282 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
@@ -294,7 +286,7 @@ int ieee80211_xmit(struct sk_buff *skb,
294 286
295 /* Save source and destination addresses */ 287 /* Save source and destination addresses */
296 memcpy(&dest, skb->data, ETH_ALEN); 288 memcpy(&dest, skb->data, ETH_ALEN);
297 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN); 289 memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN);
298 290
299 /* Advance the SKB to the start of the payload */ 291 /* Advance the SKB to the start of the payload */
300 skb_pull(skb, sizeof(struct ethhdr)); 292 skb_pull(skb, sizeof(struct ethhdr));
@@ -304,7 +296,7 @@ int ieee80211_xmit(struct sk_buff *skb,
304 296
305 if (encrypt) 297 if (encrypt)
306 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 298 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
307 IEEE80211_FCTL_PROTECTED; 299 IEEE80211_FCTL_PROTECTED;
308 else 300 else
309 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; 301 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
310 302
@@ -327,8 +319,7 @@ int ieee80211_xmit(struct sk_buff *skb,
327 319
328 /* Determine fragmentation size based on destination (multicast 320 /* Determine fragmentation size based on destination (multicast
329 * and broadcast are not fragmented) */ 321 * and broadcast are not fragmented) */
330 if (is_multicast_ether_addr(dest) || 322 if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest))
331 is_broadcast_ether_addr(dest))
332 frag_size = MAX_FRAG_THRESHOLD; 323 frag_size = MAX_FRAG_THRESHOLD;
333 else 324 else
334 frag_size = ieee->fts; 325 frag_size = ieee->fts;
@@ -345,7 +336,7 @@ int ieee80211_xmit(struct sk_buff *skb,
345 /* Each fragment may need to have room for encryptiong pre/postfix */ 336 /* Each fragment may need to have room for encryptiong pre/postfix */
346 if (encrypt) 337 if (encrypt)
347 bytes_per_frag -= crypt->ops->extra_prefix_len + 338 bytes_per_frag -= crypt->ops->extra_prefix_len +
348 crypt->ops->extra_postfix_len; 339 crypt->ops->extra_postfix_len;
349 340
350 /* Number of fragments is the total bytes_per_frag / 341 /* Number of fragments is the total bytes_per_frag /
351 * payload_per_fragment */ 342 * payload_per_fragment */
@@ -380,19 +371,19 @@ int ieee80211_xmit(struct sk_buff *skb,
380 /* If this is not the last fragment, then add the MOREFRAGS 371 /* If this is not the last fragment, then add the MOREFRAGS
381 * bit to the frame control */ 372 * bit to the frame control */
382 if (i != nr_frags - 1) { 373 if (i != nr_frags - 1) {
383 frag_hdr->frame_ctl = cpu_to_le16( 374 frag_hdr->frame_ctl =
384 fc | IEEE80211_FCTL_MOREFRAGS); 375 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
385 bytes = bytes_per_frag; 376 bytes = bytes_per_frag;
386 } else { 377 } else {
387 /* The last fragment takes the remaining length */ 378 /* The last fragment takes the remaining length */
388 bytes = bytes_last_frag; 379 bytes = bytes_last_frag;
389 } 380 }
390 381
391 /* Put a SNAP header on the first fragment */ 382 /* Put a SNAP header on the first fragment */
392 if (i == 0) { 383 if (i == 0) {
393 ieee80211_put_snap( 384 ieee80211_put_snap(skb_put
394 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)), 385 (skb_frag, SNAP_SIZE + sizeof(u16)),
395 ether_type); 386 ether_type);
396 bytes -= SNAP_SIZE + sizeof(u16); 387 bytes -= SNAP_SIZE + sizeof(u16);
397 } 388 }
398 389
@@ -410,14 +401,13 @@ int ieee80211_xmit(struct sk_buff *skb,
410 skb_put(skb_frag, 4); 401 skb_put(skb_frag, 4);
411 } 402 }
412 403
413 404 success:
414 success:
415 spin_unlock_irqrestore(&ieee->lock, flags); 405 spin_unlock_irqrestore(&ieee->lock, flags);
416 406
417 dev_kfree_skb_any(skb); 407 dev_kfree_skb_any(skb);
418 408
419 if (txb) { 409 if (txb) {
420 if ((*ieee->hard_start_xmit)(txb, dev) == 0) { 410 if ((*ieee->hard_start_xmit) (txb, dev) == 0) {
421 stats->tx_packets++; 411 stats->tx_packets++;
422 stats->tx_bytes += txb->payload_size; 412 stats->tx_bytes += txb->payload_size;
423 return 0; 413 return 0;
@@ -427,7 +417,7 @@ int ieee80211_xmit(struct sk_buff *skb,
427 417
428 return 0; 418 return 0;
429 419
430 failed: 420 failed:
431 spin_unlock_irqrestore(&ieee->lock, flags); 421 spin_unlock_irqrestore(&ieee->lock, flags);
432 netif_stop_queue(dev); 422 netif_stop_queue(dev);
433 stats->tx_errors++; 423 stats->tx_errors++;
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
index 2cd571c525a9..94882f39b072 100644
--- a/net/ieee80211/ieee80211_wx.c
+++ b/net/ieee80211/ieee80211_wx.c
@@ -29,19 +29,20 @@
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 30
31******************************************************************************/ 31******************************************************************************/
32#include <linux/wireless.h> 32
33#include <linux/version.h>
34#include <linux/kmod.h> 33#include <linux/kmod.h>
35#include <linux/module.h> 34#include <linux/module.h>
36 35
37#include <net/ieee80211.h> 36#include <net/ieee80211.h>
37#include <linux/wireless.h>
38
38static const char *ieee80211_modes[] = { 39static const char *ieee80211_modes[] = {
39 "?", "a", "b", "ab", "g", "ag", "bg", "abg" 40 "?", "a", "b", "ab", "g", "ag", "bg", "abg"
40}; 41};
41 42
42#define MAX_CUSTOM_LEN 64 43#define MAX_CUSTOM_LEN 64
43static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, 44static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
44 char *start, char *stop, 45 char *start, char *stop,
45 struct ieee80211_network *network) 46 struct ieee80211_network *network)
46{ 47{
47 char custom[MAX_CUSTOM_LEN]; 48 char custom[MAX_CUSTOM_LEN];
@@ -65,29 +66,28 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
65 iwe.u.data.length = sizeof("<hidden>"); 66 iwe.u.data.length = sizeof("<hidden>");
66 start = iwe_stream_add_point(start, stop, &iwe, "<hidden>"); 67 start = iwe_stream_add_point(start, stop, &iwe, "<hidden>");
67 } else { 68 } else {
68 iwe.u.data.length = min(network->ssid_len, (u8)32); 69 iwe.u.data.length = min(network->ssid_len, (u8) 32);
69 start = iwe_stream_add_point(start, stop, &iwe, network->ssid); 70 start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
70 } 71 }
71 72
72 /* Add the protocol name */ 73 /* Add the protocol name */
73 iwe.cmd = SIOCGIWNAME; 74 iwe.cmd = SIOCGIWNAME;
74 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]); 75 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s",
76 ieee80211_modes[network->mode]);
75 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN); 77 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN);
76 78
77 /* Add mode */ 79 /* Add mode */
78 iwe.cmd = SIOCGIWMODE; 80 iwe.cmd = SIOCGIWMODE;
79 if (network->capability & 81 if (network->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
80 (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
81 if (network->capability & WLAN_CAPABILITY_ESS) 82 if (network->capability & WLAN_CAPABILITY_ESS)
82 iwe.u.mode = IW_MODE_MASTER; 83 iwe.u.mode = IW_MODE_MASTER;
83 else 84 else
84 iwe.u.mode = IW_MODE_ADHOC; 85 iwe.u.mode = IW_MODE_ADHOC;
85 86
86 start = iwe_stream_add_event(start, stop, &iwe, 87 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_UINT_LEN);
87 IW_EV_UINT_LEN);
88 } 88 }
89 89
90 /* Add frequency/channel */ 90 /* Add frequency/channel */
91 iwe.cmd = SIOCGIWFREQ; 91 iwe.cmd = SIOCGIWFREQ;
92/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode); 92/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
93 iwe.u.freq.e = 3; */ 93 iwe.u.freq.e = 3; */
@@ -109,7 +109,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
109 max_rate = 0; 109 max_rate = 0;
110 p = custom; 110 p = custom;
111 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): "); 111 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
112 for (i = 0, j = 0; i < network->rates_len; ) { 112 for (i = 0, j = 0; i < network->rates_len;) {
113 if (j < network->rates_ex_len && 113 if (j < network->rates_ex_len &&
114 ((network->rates_ex[j] & 0x7F) < 114 ((network->rates_ex[j] & 0x7F) <
115 (network->rates[i] & 0x7F))) 115 (network->rates[i] & 0x7F)))
@@ -132,8 +132,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
132 iwe.cmd = SIOCGIWRATE; 132 iwe.cmd = SIOCGIWRATE;
133 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; 133 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
134 iwe.u.bitrate.value = max_rate * 500000; 134 iwe.u.bitrate.value = max_rate * 500000;
135 start = iwe_stream_add_event(start, stop, &iwe, 135 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_PARAM_LEN);
136 IW_EV_PARAM_LEN);
137 136
138 iwe.cmd = IWEVCUSTOM; 137 iwe.cmd = IWEVCUSTOM;
139 iwe.u.data.length = p - custom; 138 iwe.u.data.length = p - custom;
@@ -163,7 +162,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
163 if (iwe.u.data.length) 162 if (iwe.u.data.length)
164 start = iwe_stream_add_point(start, stop, &iwe, custom); 163 start = iwe_stream_add_point(start, stop, &iwe, custom);
165 164
166 if (ieee->wpa_enabled && network->wpa_ie_len){ 165 if (ieee->wpa_enabled && network->wpa_ie_len) {
167 char buf[MAX_WPA_IE_LEN * 2 + 30]; 166 char buf[MAX_WPA_IE_LEN * 2 + 30];
168 167
169 u8 *p = buf; 168 u8 *p = buf;
@@ -178,7 +177,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
178 start = iwe_stream_add_point(start, stop, &iwe, buf); 177 start = iwe_stream_add_point(start, stop, &iwe, buf);
179 } 178 }
180 179
181 if (ieee->wpa_enabled && network->rsn_ie_len){ 180 if (ieee->wpa_enabled && network->rsn_ie_len) {
182 char buf[MAX_WPA_IE_LEN * 2 + 30]; 181 char buf[MAX_WPA_IE_LEN * 2 + 30];
183 182
184 u8 *p = buf; 183 u8 *p = buf;
@@ -198,12 +197,12 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
198 iwe.cmd = IWEVCUSTOM; 197 iwe.cmd = IWEVCUSTOM;
199 p = custom; 198 p = custom;
200 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), 199 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
201 " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100)); 200 " Last beacon: %lums ago",
201 (jiffies - network->last_scanned) / (HZ / 100));
202 iwe.u.data.length = p - custom; 202 iwe.u.data.length = p - custom;
203 if (iwe.u.data.length) 203 if (iwe.u.data.length)
204 start = iwe_stream_add_point(start, stop, &iwe, custom); 204 start = iwe_stream_add_point(start, stop, &iwe, custom);
205 205
206
207 return start; 206 return start;
208} 207}
209 208
@@ -228,18 +227,19 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
228 time_after(network->last_scanned + ieee->scan_age, jiffies)) 227 time_after(network->last_scanned + ieee->scan_age, jiffies))
229 ev = ipw2100_translate_scan(ieee, ev, stop, network); 228 ev = ipw2100_translate_scan(ieee, ev, stop, network);
230 else 229 else
231 IEEE80211_DEBUG_SCAN( 230 IEEE80211_DEBUG_SCAN("Not showing network '%s ("
232 "Not showing network '%s (" 231 MAC_FMT ")' due to age (%lums).\n",
233 MAC_FMT ")' due to age (%lums).\n", 232 escape_essid(network->ssid,
234 escape_essid(network->ssid, 233 network->ssid_len),
235 network->ssid_len), 234 MAC_ARG(network->bssid),
236 MAC_ARG(network->bssid), 235 (jiffies -
237 (jiffies - network->last_scanned) / (HZ / 100)); 236 network->last_scanned) / (HZ /
237 100));
238 } 238 }
239 239
240 spin_unlock_irqrestore(&ieee->lock, flags); 240 spin_unlock_irqrestore(&ieee->lock, flags);
241 241
242 wrqu->data.length = ev - extra; 242 wrqu->data.length = ev - extra;
243 wrqu->data.flags = 0; 243 wrqu->data.flags = 0;
244 244
245 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i); 245 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
@@ -291,8 +291,8 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
291 if (ieee->crypt[i] != NULL) { 291 if (ieee->crypt[i] != NULL) {
292 if (key_provided) 292 if (key_provided)
293 break; 293 break;
294 ieee80211_crypt_delayed_deinit( 294 ieee80211_crypt_delayed_deinit(ieee,
295 ieee, &ieee->crypt[i]); 295 &ieee->crypt[i]);
296 } 296 }
297 } 297 }
298 298
@@ -305,8 +305,6 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
305 goto done; 305 goto done;
306 } 306 }
307 307
308
309
310 sec.enabled = 1; 308 sec.enabled = 1;
311 sec.flags |= SEC_ENABLED; 309 sec.flags |= SEC_ENABLED;
312 310
@@ -340,8 +338,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
340 new_crypt = NULL; 338 new_crypt = NULL;
341 339
342 printk(KERN_WARNING "%s: could not initialize WEP: " 340 printk(KERN_WARNING "%s: could not initialize WEP: "
343 "load module ieee80211_crypt_wep\n", 341 "load module ieee80211_crypt_wep\n", dev->name);
344 dev->name);
345 return -EOPNOTSUPP; 342 return -EOPNOTSUPP;
346 } 343 }
347 *crypt = new_crypt; 344 *crypt = new_crypt;
@@ -358,7 +355,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
358 key, escape_essid(sec.keys[key], len), 355 key, escape_essid(sec.keys[key], len),
359 erq->length, len); 356 erq->length, len);
360 sec.key_sizes[key] = len; 357 sec.key_sizes[key] = len;
361 (*crypt)->ops->set_key(sec.keys[key], len, NULL, 358 (*crypt)->ops->set_key(sec.keys[key], len, NULL,
362 (*crypt)->priv); 359 (*crypt)->priv);
363 sec.flags |= (1 << key); 360 sec.flags |= (1 << key);
364 /* This ensures a key will be activated if no key is 361 /* This ensures a key will be activated if no key is
@@ -381,15 +378,15 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
381 378
382 /* No key data - just set the default TX key index */ 379 /* No key data - just set the default TX key index */
383 if (key_provided) { 380 if (key_provided) {
384 IEEE80211_DEBUG_WX( 381 IEEE80211_DEBUG_WX
385 "Setting key %d to default Tx key.\n", key); 382 ("Setting key %d to default Tx key.\n", key);
386 ieee->tx_keyidx = key; 383 ieee->tx_keyidx = key;
387 sec.active_key = key; 384 sec.active_key = key;
388 sec.flags |= SEC_ACTIVE_KEY; 385 sec.flags |= SEC_ACTIVE_KEY;
389 } 386 }
390 } 387 }
391 388
392 done: 389 done:
393 ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); 390 ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
394 sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; 391 sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
395 sec.flags |= SEC_AUTH_MODE; 392 sec.flags |= SEC_AUTH_MODE;
@@ -399,7 +396,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
399 /* For now we just support WEP, so only set that security level... 396 /* For now we just support WEP, so only set that security level...
400 * TODO: When WPA is added this is one place that needs to change */ 397 * TODO: When WPA is added this is one place that needs to change */
401 sec.flags |= SEC_LEVEL; 398 sec.flags |= SEC_LEVEL;
402 sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ 399 sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
403 400
404 if (ieee->set_security) 401 if (ieee->set_security)
405 ieee->set_security(dev, &sec); 402 ieee->set_security(dev, &sec);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9e6e683cc34d..e7d26d9943c2 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -457,7 +457,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
457 457
458 if (pskb_pull(skb, ihl) == NULL) 458 if (pskb_pull(skb, ihl) == NULL)
459 goto err; 459 goto err;
460 if (pskb_trim(skb, end-offset)) 460 if (pskb_trim_rcsum(skb, end-offset))
461 goto err; 461 goto err;
462 462
463 /* Find out which fragments are in front and at the back of us 463 /* Find out which fragments are in front and at the back of us
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index e046f5521814..30aa8e2ee214 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -34,6 +34,7 @@ config IP_NF_CT_ACCT
34 34
35config IP_NF_CONNTRACK_MARK 35config IP_NF_CONNTRACK_MARK
36 bool 'Connection mark tracking support' 36 bool 'Connection mark tracking support'
37 depends on IP_NF_CONNTRACK
37 help 38 help
38 This option enables support for connection marks, used by the 39 This option enables support for connection marks, used by the
39 `CONNMARK' target and `connmark' match. Similar to the mark value 40 `CONNMARK' target and `connmark' match. Similar to the mark value
@@ -85,6 +86,25 @@ config IP_NF_IRC
85 86
86 To compile it as a module, choose M here. If unsure, say Y. 87 To compile it as a module, choose M here. If unsure, say Y.
87 88
89config IP_NF_NETBIOS_NS
90 tristate "NetBIOS name service protocol support (EXPERIMENTAL)"
91 depends on IP_NF_CONNTRACK && EXPERIMENTAL
92 help
93 NetBIOS name service requests are sent as broadcast messages from an
94 unprivileged port and responded to with unicast messages to the
95 same port. This make them hard to firewall properly because connection
96 tracking doesn't deal with broadcasts. This helper tracks locally
97 originating NetBIOS name service requests and the corresponding
98 responses. It relies on correct IP address configuration, specifically
99 netmask and broadcast address. When properly configured, the output
100 of "ip address show" should look similar to this:
101
102 $ ip -4 address show eth0
103 4: eth0: <BROADCAST,MULTICAST,UP> mtu 1500 qdisc pfifo_fast qlen 1000
104 inet 172.16.2.252/24 brd 172.16.2.255 scope global eth0
105
106 To compile it as a module, choose M here. If unsure, say N.
107
88config IP_NF_TFTP 108config IP_NF_TFTP
89 tristate "TFTP protocol support" 109 tristate "TFTP protocol support"
90 depends on IP_NF_CONNTRACK 110 depends on IP_NF_CONNTRACK
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index a7bd38f50522..1ba0db746817 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IP_NF_AMANDA) += ip_conntrack_amanda.o
21obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o 21obj-$(CONFIG_IP_NF_TFTP) += ip_conntrack_tftp.o
22obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o 22obj-$(CONFIG_IP_NF_FTP) += ip_conntrack_ftp.o
23obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o 23obj-$(CONFIG_IP_NF_IRC) += ip_conntrack_irc.o
24obj-$(CONFIG_IP_NF_NETBIOS_NS) += ip_conntrack_netbios_ns.o
24 25
25# NAT helpers 26# NAT helpers
26obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o 27obj-$(CONFIG_IP_NF_NAT_AMANDA) += ip_nat_amanda.o
diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c
index be4c9eb3243f..dc20881004bc 100644
--- a/net/ipv4/netfilter/ip_conntrack_amanda.c
+++ b/net/ipv4/netfilter/ip_conntrack_amanda.c
@@ -108,6 +108,7 @@ static int help(struct sk_buff **pskb,
108 } 108 }
109 109
110 exp->expectfn = NULL; 110 exp->expectfn = NULL;
111 exp->flags = 0;
111 112
112 exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip; 113 exp->tuple.src.ip = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip;
113 exp->tuple.src.u.tcp.port = 0; 114 exp->tuple.src.u.tcp.port = 0;
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index a0648600190e..19cba16e6e1e 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -197,7 +197,7 @@ ip_ct_invert_tuple(struct ip_conntrack_tuple *inverse,
197 197
198 198
199/* ip_conntrack_expect helper functions */ 199/* ip_conntrack_expect helper functions */
200static void unlink_expect(struct ip_conntrack_expect *exp) 200void ip_ct_unlink_expect(struct ip_conntrack_expect *exp)
201{ 201{
202 ASSERT_WRITE_LOCK(&ip_conntrack_lock); 202 ASSERT_WRITE_LOCK(&ip_conntrack_lock);
203 IP_NF_ASSERT(!timer_pending(&exp->timeout)); 203 IP_NF_ASSERT(!timer_pending(&exp->timeout));
@@ -207,18 +207,12 @@ static void unlink_expect(struct ip_conntrack_expect *exp)
207 ip_conntrack_expect_put(exp); 207 ip_conntrack_expect_put(exp);
208} 208}
209 209
210void __ip_ct_expect_unlink_destroy(struct ip_conntrack_expect *exp)
211{
212 unlink_expect(exp);
213 ip_conntrack_expect_put(exp);
214}
215
216static void expectation_timed_out(unsigned long ul_expect) 210static void expectation_timed_out(unsigned long ul_expect)
217{ 211{
218 struct ip_conntrack_expect *exp = (void *)ul_expect; 212 struct ip_conntrack_expect *exp = (void *)ul_expect;
219 213
220 write_lock_bh(&ip_conntrack_lock); 214 write_lock_bh(&ip_conntrack_lock);
221 unlink_expect(exp); 215 ip_ct_unlink_expect(exp);
222 write_unlock_bh(&ip_conntrack_lock); 216 write_unlock_bh(&ip_conntrack_lock);
223 ip_conntrack_expect_put(exp); 217 ip_conntrack_expect_put(exp);
224} 218}
@@ -264,10 +258,14 @@ find_expectation(const struct ip_conntrack_tuple *tuple)
264 master ct never got confirmed, we'd hold a reference to it 258 master ct never got confirmed, we'd hold a reference to it
265 and weird things would happen to future packets). */ 259 and weird things would happen to future packets). */
266 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) 260 if (ip_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
267 && is_confirmed(i->master) 261 && is_confirmed(i->master)) {
268 && del_timer(&i->timeout)) { 262 if (i->flags & IP_CT_EXPECT_PERMANENT) {
269 unlink_expect(i); 263 atomic_inc(&i->use);
270 return i; 264 return i;
265 } else if (del_timer(&i->timeout)) {
266 ip_ct_unlink_expect(i);
267 return i;
268 }
271 } 269 }
272 } 270 }
273 return NULL; 271 return NULL;
@@ -284,7 +282,7 @@ void ip_ct_remove_expectations(struct ip_conntrack *ct)
284 282
285 list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) { 283 list_for_each_entry_safe(i, tmp, &ip_conntrack_expect_list, list) {
286 if (i->master == ct && del_timer(&i->timeout)) { 284 if (i->master == ct && del_timer(&i->timeout)) {
287 unlink_expect(i); 285 ip_ct_unlink_expect(i);
288 ip_conntrack_expect_put(i); 286 ip_conntrack_expect_put(i);
289 } 287 }
290 } 288 }
@@ -925,7 +923,7 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
925 /* choose the the oldest expectation to evict */ 923 /* choose the the oldest expectation to evict */
926 list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) { 924 list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
927 if (expect_matches(i, exp) && del_timer(&i->timeout)) { 925 if (expect_matches(i, exp) && del_timer(&i->timeout)) {
928 unlink_expect(i); 926 ip_ct_unlink_expect(i);
929 write_unlock_bh(&ip_conntrack_lock); 927 write_unlock_bh(&ip_conntrack_lock);
930 ip_conntrack_expect_put(i); 928 ip_conntrack_expect_put(i);
931 return; 929 return;
@@ -934,6 +932,9 @@ void ip_conntrack_unexpect_related(struct ip_conntrack_expect *exp)
934 write_unlock_bh(&ip_conntrack_lock); 932 write_unlock_bh(&ip_conntrack_lock);
935} 933}
936 934
935/* We don't increase the master conntrack refcount for non-fulfilled
936 * conntracks. During the conntrack destruction, the expectations are
937 * always killed before the conntrack itself */
937struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me) 938struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
938{ 939{
939 struct ip_conntrack_expect *new; 940 struct ip_conntrack_expect *new;
@@ -944,17 +945,14 @@ struct ip_conntrack_expect *ip_conntrack_expect_alloc(struct ip_conntrack *me)
944 return NULL; 945 return NULL;
945 } 946 }
946 new->master = me; 947 new->master = me;
947 atomic_inc(&new->master->ct_general.use);
948 atomic_set(&new->use, 1); 948 atomic_set(&new->use, 1);
949 return new; 949 return new;
950} 950}
951 951
952void ip_conntrack_expect_put(struct ip_conntrack_expect *exp) 952void ip_conntrack_expect_put(struct ip_conntrack_expect *exp)
953{ 953{
954 if (atomic_dec_and_test(&exp->use)) { 954 if (atomic_dec_and_test(&exp->use))
955 ip_conntrack_put(exp->master);
956 kmem_cache_free(ip_conntrack_expect_cachep, exp); 955 kmem_cache_free(ip_conntrack_expect_cachep, exp);
957 }
958} 956}
959 957
960static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp) 958static void ip_conntrack_expect_insert(struct ip_conntrack_expect *exp)
@@ -982,7 +980,7 @@ static void evict_oldest_expect(struct ip_conntrack *master)
982 list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) { 980 list_for_each_entry_reverse(i, &ip_conntrack_expect_list, list) {
983 if (i->master == master) { 981 if (i->master == master) {
984 if (del_timer(&i->timeout)) { 982 if (del_timer(&i->timeout)) {
985 unlink_expect(i); 983 ip_ct_unlink_expect(i);
986 ip_conntrack_expect_put(i); 984 ip_conntrack_expect_put(i);
987 } 985 }
988 break; 986 break;
@@ -1099,7 +1097,7 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me)
1099 /* Get rid of expectations */ 1097 /* Get rid of expectations */
1100 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) { 1098 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, list) {
1101 if (exp->master->helper == me && del_timer(&exp->timeout)) { 1099 if (exp->master->helper == me && del_timer(&exp->timeout)) {
1102 unlink_expect(exp); 1100 ip_ct_unlink_expect(exp);
1103 ip_conntrack_expect_put(exp); 1101 ip_conntrack_expect_put(exp);
1104 } 1102 }
1105 } 1103 }
diff --git a/net/ipv4/netfilter/ip_conntrack_ftp.c b/net/ipv4/netfilter/ip_conntrack_ftp.c
index 3a2627db1729..1b79ec36085f 100644
--- a/net/ipv4/netfilter/ip_conntrack_ftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_ftp.c
@@ -421,6 +421,7 @@ static int help(struct sk_buff **pskb,
421 { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }}); 421 { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }});
422 422
423 exp->expectfn = NULL; 423 exp->expectfn = NULL;
424 exp->flags = 0;
424 425
425 /* Now, NAT might want to mangle the packet, and register the 426 /* Now, NAT might want to mangle the packet, and register the
426 * (possibly changed) expectation itself. */ 427 * (possibly changed) expectation itself. */
diff --git a/net/ipv4/netfilter/ip_conntrack_irc.c b/net/ipv4/netfilter/ip_conntrack_irc.c
index 25438eec21a1..d7a8a98c05e1 100644
--- a/net/ipv4/netfilter/ip_conntrack_irc.c
+++ b/net/ipv4/netfilter/ip_conntrack_irc.c
@@ -221,6 +221,7 @@ static int help(struct sk_buff **pskb,
221 { { 0, { 0 } }, 221 { { 0, { 0 } },
222 { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }}); 222 { 0xFFFFFFFF, { .tcp = { 0xFFFF } }, 0xFF }});
223 exp->expectfn = NULL; 223 exp->expectfn = NULL;
224 exp->flags = 0;
224 if (ip_nat_irc_hook) 225 if (ip_nat_irc_hook)
225 ret = ip_nat_irc_hook(pskb, ctinfo, 226 ret = ip_nat_irc_hook(pskb, ctinfo,
226 addr_beg_p - ib_ptr, 227 addr_beg_p - ib_ptr,
diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
new file mode 100644
index 000000000000..2b5cf9c51309
--- /dev/null
+++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c
@@ -0,0 +1,131 @@
1/*
2 * NetBIOS name service broadcast connection tracking helper
3 *
4 * (c) 2005 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11/*
12 * This helper tracks locally originating NetBIOS name service
13 * requests by issuing permanent expectations (valid until
14 * timing out) matching all reply connections from the
15 * destination network. The only NetBIOS specific thing is
16 * actually the port number.
17 */
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/skbuff.h>
22#include <linux/netdevice.h>
23#include <linux/inetdevice.h>
24#include <linux/in.h>
25#include <linux/ip.h>
26#include <linux/udp.h>
27#include <net/route.h>
28
29#include <linux/netfilter.h>
30#include <linux/netfilter_ipv4.h>
31#include <linux/netfilter_ipv4/ip_conntrack.h>
32#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
33
34MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
35MODULE_DESCRIPTION("NetBIOS name service broadcast connection tracking helper");
36MODULE_LICENSE("GPL");
37
38static unsigned int timeout = 3;
39module_param(timeout, int, 0600);
40MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
41
42static int help(struct sk_buff **pskb,
43 struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
44{
45 struct ip_conntrack_expect *exp;
46 struct iphdr *iph = (*pskb)->nh.iph;
47 struct udphdr _uh, *uh;
48 struct rtable *rt = (struct rtable *)(*pskb)->dst;
49 struct in_device *in_dev;
50 u_int32_t mask = 0;
51
52 /* we're only interested in locally generated packets */
53 if ((*pskb)->sk == NULL)
54 goto out;
55 if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
56 goto out;
57 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
58 goto out;
59
60 rcu_read_lock();
61 in_dev = __in_dev_get(rt->u.dst.dev);
62 if (in_dev != NULL) {
63 for_primary_ifa(in_dev) {
64 if (ifa->ifa_broadcast == iph->daddr) {
65 mask = ifa->ifa_mask;
66 break;
67 }
68 } endfor_ifa(in_dev);
69 }
70 rcu_read_unlock();
71
72 if (mask == 0)
73 goto out;
74
75 uh = skb_header_pointer(*pskb, iph->ihl * 4, sizeof(_uh), &_uh);
76 BUG_ON(uh == NULL);
77
78 exp = ip_conntrack_expect_alloc(ct);
79 if (exp == NULL)
80 goto out;
81 memset(&exp->tuple, 0, sizeof(exp->tuple));
82 exp->tuple.src.ip = iph->daddr & mask;
83 exp->tuple.dst.ip = iph->saddr;
84 exp->tuple.dst.u.udp.port = uh->source;
85 exp->tuple.dst.protonum = IPPROTO_UDP;
86
87 memset(&exp->mask, 0, sizeof(exp->mask));
88 exp->mask.src.ip = mask;
89 exp->mask.dst.ip = 0xFFFFFFFF;
90 exp->mask.dst.u.udp.port = 0xFFFF;
91 exp->mask.dst.protonum = 0xFF;
92
93 exp->expectfn = NULL;
94 exp->flags = IP_CT_EXPECT_PERMANENT;
95
96 ip_conntrack_expect_related(exp);
97 ip_conntrack_expect_put(exp);
98
99 ip_ct_refresh_acct(ct, ctinfo, NULL, timeout * HZ);
100out:
101 return NF_ACCEPT;
102}
103
104static struct ip_conntrack_helper helper = {
105 .name = "netbios-ns",
106 .tuple = {
107 .src.u.udp.port = __constant_htons(137),
108 .dst.protonum = IPPROTO_UDP,
109 },
110 .mask = {
111 .src.u.udp.port = 0xFFFF,
112 .dst.protonum = 0xFF,
113 },
114 .max_expected = 1,
115 .me = THIS_MODULE,
116 .help = help,
117};
118
119static int __init init(void)
120{
121 helper.timeout = timeout;
122 return ip_conntrack_helper_register(&helper);
123}
124
125static void __exit fini(void)
126{
127 ip_conntrack_helper_unregister(&helper);
128}
129
130module_init(init);
131module_exit(fini);
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index a4e9278db4ed..15aef3564742 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -1349,8 +1349,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1349 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, 1349 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
1350 list) { 1350 list) {
1351 if (exp->master->helper == h 1351 if (exp->master->helper == h
1352 && del_timer(&exp->timeout)) 1352 && del_timer(&exp->timeout)) {
1353 __ip_ct_expect_unlink_destroy(exp); 1353 ip_ct_unlink_expect(exp);
1354 ip_conntrack_expect_put(exp);
1355 }
1354 } 1356 }
1355 write_unlock(&ip_conntrack_lock); 1357 write_unlock(&ip_conntrack_lock);
1356 } else { 1358 } else {
@@ -1358,8 +1360,10 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
1358 write_lock_bh(&ip_conntrack_lock); 1360 write_lock_bh(&ip_conntrack_lock);
1359 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list, 1361 list_for_each_entry_safe(exp, tmp, &ip_conntrack_expect_list,
1360 list) { 1362 list) {
1361 if (del_timer(&exp->timeout)) 1363 if (del_timer(&exp->timeout)) {
1362 __ip_ct_expect_unlink_destroy(exp); 1364 ip_ct_unlink_expect(exp);
1365 ip_conntrack_expect_put(exp);
1366 }
1363 } 1367 }
1364 write_unlock_bh(&ip_conntrack_lock); 1368 write_unlock_bh(&ip_conntrack_lock);
1365 } 1369 }
@@ -1413,6 +1417,7 @@ ctnetlink_create_expect(struct nfattr *cda[])
1413 } 1417 }
1414 1418
1415 exp->expectfn = NULL; 1419 exp->expectfn = NULL;
1420 exp->flags = 0;
1416 exp->master = ct; 1421 exp->master = ct;
1417 memcpy(&exp->tuple, &tuple, sizeof(struct ip_conntrack_tuple)); 1422 memcpy(&exp->tuple, &tuple, sizeof(struct ip_conntrack_tuple));
1418 memcpy(&exp->mask, &mask, sizeof(struct ip_conntrack_tuple)); 1423 memcpy(&exp->mask, &mask, sizeof(struct ip_conntrack_tuple));
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index f23ef1f88c46..1985abc59d24 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -349,6 +349,7 @@ static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
349 return 0; 349 return 0;
350 350
351nfattr_failure: 351nfattr_failure:
352 read_unlock_bh(&tcp_lock);
352 return -1; 353 return -1;
353} 354}
354#endif 355#endif
diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c
index ee5895afd0c3..ae3e3e655db5 100644
--- a/net/ipv4/netfilter/ip_conntrack_standalone.c
+++ b/net/ipv4/netfilter/ip_conntrack_standalone.c
@@ -998,7 +998,7 @@ EXPORT_SYMBOL(ip_conntrack_expect_related);
998EXPORT_SYMBOL(ip_conntrack_unexpect_related); 998EXPORT_SYMBOL(ip_conntrack_unexpect_related);
999EXPORT_SYMBOL_GPL(ip_conntrack_expect_list); 999EXPORT_SYMBOL_GPL(ip_conntrack_expect_list);
1000EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find); 1000EXPORT_SYMBOL_GPL(__ip_conntrack_expect_find);
1001EXPORT_SYMBOL_GPL(__ip_ct_expect_unlink_destroy); 1001EXPORT_SYMBOL_GPL(ip_ct_unlink_expect);
1002 1002
1003EXPORT_SYMBOL(ip_conntrack_tuple_taken); 1003EXPORT_SYMBOL(ip_conntrack_tuple_taken);
1004EXPORT_SYMBOL(ip_ct_gather_frags); 1004EXPORT_SYMBOL(ip_ct_gather_frags);
diff --git a/net/ipv4/netfilter/ip_conntrack_tftp.c b/net/ipv4/netfilter/ip_conntrack_tftp.c
index f8ff170f390a..d2b590533452 100644
--- a/net/ipv4/netfilter/ip_conntrack_tftp.c
+++ b/net/ipv4/netfilter/ip_conntrack_tftp.c
@@ -75,6 +75,7 @@ static int tftp_help(struct sk_buff **pskb,
75 exp->mask.dst.u.udp.port = 0xffff; 75 exp->mask.dst.u.udp.port = 0xffff;
76 exp->mask.dst.protonum = 0xff; 76 exp->mask.dst.protonum = 0xff;
77 exp->expectfn = NULL; 77 exp->expectfn = NULL;
78 exp->flags = 0;
78 79
79 DEBUGP("expect: "); 80 DEBUGP("expect: ");
80 DUMP_TUPLE(&exp->tuple); 81 DUMP_TUPLE(&exp->tuple);
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index 60d70fa41a15..cb66b8bddeb3 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -255,6 +255,27 @@ alloc_null_binding(struct ip_conntrack *conntrack,
255 return ip_nat_setup_info(conntrack, &range, hooknum); 255 return ip_nat_setup_info(conntrack, &range, hooknum);
256} 256}
257 257
258unsigned int
259alloc_null_binding_confirmed(struct ip_conntrack *conntrack,
260 struct ip_nat_info *info,
261 unsigned int hooknum)
262{
263 u_int32_t ip
264 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
265 ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip
266 : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip);
267 u_int16_t all
268 = (HOOK2MANIP(hooknum) == IP_NAT_MANIP_SRC
269 ? conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.all
270 : conntrack->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.all);
271 struct ip_nat_range range
272 = { IP_NAT_RANGE_MAP_IPS, ip, ip, { all }, { all } };
273
274 DEBUGP("Allocating NULL binding for confirmed %p (%u.%u.%u.%u)\n",
275 conntrack, NIPQUAD(ip));
276 return ip_nat_setup_info(conntrack, &range, hooknum);
277}
278
258int ip_nat_rule_find(struct sk_buff **pskb, 279int ip_nat_rule_find(struct sk_buff **pskb,
259 unsigned int hooknum, 280 unsigned int hooknum,
260 const struct net_device *in, 281 const struct net_device *in,
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index 89db052add81..0ff368b131f6 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -123,8 +123,12 @@ ip_nat_fn(unsigned int hooknum,
123 if (!ip_nat_initialized(ct, maniptype)) { 123 if (!ip_nat_initialized(ct, maniptype)) {
124 unsigned int ret; 124 unsigned int ret;
125 125
126 /* LOCAL_IN hook doesn't have a chain! */ 126 if (unlikely(is_confirmed(ct)))
127 if (hooknum == NF_IP_LOCAL_IN) 127 /* NAT module was loaded late */
128 ret = alloc_null_binding_confirmed(ct, info,
129 hooknum);
130 else if (hooknum == NF_IP_LOCAL_IN)
131 /* LOCAL_IN hook doesn't have a chain! */
128 ret = alloc_null_binding(ct, info, hooknum); 132 ret = alloc_null_binding(ct, info, hooknum);
129 else 133 else
130 ret = ip_nat_rule_find(pskb, hooknum, 134 ret = ip_nat_rule_find(pskb, hooknum,
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 249bddb28acd..f81fe8c52e99 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -371,6 +371,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
371 break; 371 break;
372 372
373 case NFQNL_COPY_PACKET: 373 case NFQNL_COPY_PACKET:
374 if (entry->skb->ip_summed == CHECKSUM_HW &&
375 (*errp = skb_checksum_help(entry->skb,
376 entry->info->outdev == NULL))) {
377 spin_unlock_bh(&queue->lock);
378 return NULL;
379 }
374 if (queue->copy_range == 0 380 if (queue->copy_range == 0
375 || queue->copy_range > entry->skb->len) 381 || queue->copy_range > entry->skb->len)
376 data_len = entry->skb->len; 382 data_len = entry->skb->len;
@@ -636,7 +642,7 @@ nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
636 if (!skb_make_writable(&e->skb, data_len)) 642 if (!skb_make_writable(&e->skb, data_len))
637 return -ENOMEM; 643 return -ENOMEM;
638 memcpy(e->skb->data, data, data_len); 644 memcpy(e->skb->data, data, data_len);
639 645 e->skb->ip_summed = CHECKSUM_NONE;
640 return 0; 646 return 0;
641} 647}
642 648
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 62435ffc6184..a64e1d5ce3ca 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -398,24 +398,13 @@ static int netlink_create(struct socket *sock, int protocol)
398 if (nl_table[protocol].registered && 398 if (nl_table[protocol].registered &&
399 try_module_get(nl_table[protocol].module)) 399 try_module_get(nl_table[protocol].module))
400 module = nl_table[protocol].module; 400 module = nl_table[protocol].module;
401 else
402 err = -EPROTONOSUPPORT;
403 groups = nl_table[protocol].groups; 401 groups = nl_table[protocol].groups;
404 netlink_unlock_table(); 402 netlink_unlock_table();
405 403
406 if (err || (err = __netlink_create(sock, protocol) < 0)) 404 if ((err = __netlink_create(sock, protocol) < 0))
407 goto out_module; 405 goto out_module;
408 406
409 nlk = nlk_sk(sock->sk); 407 nlk = nlk_sk(sock->sk);
410
411 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL);
412 if (nlk->groups == NULL) {
413 err = -ENOMEM;
414 goto out_module;
415 }
416 memset(nlk->groups, 0, NLGRPSZ(groups));
417 nlk->ngroups = groups;
418
419 nlk->module = module; 408 nlk->module = module;
420out: 409out:
421 return err; 410 return err;
@@ -534,6 +523,29 @@ netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
534 nlk->subscriptions = subscriptions; 523 nlk->subscriptions = subscriptions;
535} 524}
536 525
526static int netlink_alloc_groups(struct sock *sk)
527{
528 struct netlink_sock *nlk = nlk_sk(sk);
529 unsigned int groups;
530 int err = 0;
531
532 netlink_lock_table();
533 groups = nl_table[sk->sk_protocol].groups;
534 if (!nl_table[sk->sk_protocol].registered)
535 err = -ENOENT;
536 netlink_unlock_table();
537
538 if (err)
539 return err;
540
541 nlk->groups = kmalloc(NLGRPSZ(groups), GFP_KERNEL);
542 if (nlk->groups == NULL)
543 return -ENOMEM;
544 memset(nlk->groups, 0, NLGRPSZ(groups));
545 nlk->ngroups = groups;
546 return 0;
547}
548
537static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 549static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
538{ 550{
539 struct sock *sk = sock->sk; 551 struct sock *sk = sock->sk;
@@ -545,8 +557,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
545 return -EINVAL; 557 return -EINVAL;
546 558
547 /* Only superuser is allowed to listen multicasts */ 559 /* Only superuser is allowed to listen multicasts */
548 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV)) 560 if (nladdr->nl_groups) {
549 return -EPERM; 561 if (!netlink_capable(sock, NL_NONROOT_RECV))
562 return -EPERM;
563 if (nlk->groups == NULL) {
564 err = netlink_alloc_groups(sk);
565 if (err)
566 return err;
567 }
568 }
550 569
551 if (nlk->pid) { 570 if (nlk->pid) {
552 if (nladdr->nl_pid != nlk->pid) 571 if (nladdr->nl_pid != nlk->pid)
@@ -559,7 +578,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len
559 return err; 578 return err;
560 } 579 }
561 580
562 if (!nladdr->nl_groups && !(u32)nlk->groups[0]) 581 if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
563 return 0; 582 return 0;
564 583
565 netlink_table_grab(); 584 netlink_table_grab();
@@ -620,7 +639,7 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr
620 nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 639 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
621 } else { 640 } else {
622 nladdr->nl_pid = nlk->pid; 641 nladdr->nl_pid = nlk->pid;
623 nladdr->nl_groups = nlk->groups[0]; 642 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
624 } 643 }
625 return 0; 644 return 0;
626} 645}
@@ -976,6 +995,11 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
976 995
977 if (!netlink_capable(sock, NL_NONROOT_RECV)) 996 if (!netlink_capable(sock, NL_NONROOT_RECV))
978 return -EPERM; 997 return -EPERM;
998 if (nlk->groups == NULL) {
999 err = netlink_alloc_groups(sk);
1000 if (err)
1001 return err;
1002 }
979 if (!val || val - 1 >= nlk->ngroups) 1003 if (!val || val - 1 >= nlk->ngroups)
980 return -EINVAL; 1004 return -EINVAL;
981 netlink_table_grab(); 1005 netlink_table_grab();
@@ -1483,8 +1507,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1483 s, 1507 s,
1484 s->sk_protocol, 1508 s->sk_protocol,
1485 nlk->pid, 1509 nlk->pid,
1486 nlk->flags & NETLINK_KERNEL_SOCKET ? 1510 nlk->groups ? (u32)nlk->groups[0] : 0,
1487 0 : (unsigned int)nlk->groups[0],
1488 atomic_read(&s->sk_rmem_alloc), 1511 atomic_read(&s->sk_rmem_alloc),
1489 atomic_read(&s->sk_wmem_alloc), 1512 atomic_read(&s->sk_wmem_alloc),
1490 nlk->cb, 1513 nlk->cb,
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 4b53de982114..f4578c759ffc 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1261,6 +1261,7 @@ static int nr_info_show(struct seq_file *seq, void *v)
1261 struct net_device *dev; 1261 struct net_device *dev;
1262 struct nr_sock *nr; 1262 struct nr_sock *nr;
1263 const char *devname; 1263 const char *devname;
1264 char buf[11];
1264 1265
1265 if (v == SEQ_START_TOKEN) 1266 if (v == SEQ_START_TOKEN)
1266 seq_puts(seq, 1267 seq_puts(seq,
@@ -1276,11 +1277,11 @@ static int nr_info_show(struct seq_file *seq, void *v)
1276 else 1277 else
1277 devname = dev->name; 1278 devname = dev->name;
1278 1279
1279 seq_printf(seq, "%-9s ", ax2asc(&nr->user_addr)); 1280 seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
1280 seq_printf(seq, "%-9s ", ax2asc(&nr->dest_addr)); 1281 seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
1281 seq_printf(seq, 1282 seq_printf(seq,
1282"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n", 1283"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
1283 ax2asc(&nr->source_addr), 1284 ax2asc(buf, &nr->source_addr),
1284 devname, 1285 devname,
1285 nr->my_index, 1286 nr->my_index,
1286 nr->my_id, 1287 nr->my_id,
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 7a86b36cba50..b3b9097c87c7 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -881,6 +881,7 @@ static void nr_node_stop(struct seq_file *seq, void *v)
881 881
882static int nr_node_show(struct seq_file *seq, void *v) 882static int nr_node_show(struct seq_file *seq, void *v)
883{ 883{
884 char buf[11];
884 int i; 885 int i;
885 886
886 if (v == SEQ_START_TOKEN) 887 if (v == SEQ_START_TOKEN)
@@ -890,7 +891,7 @@ static int nr_node_show(struct seq_file *seq, void *v)
890 struct nr_node *nr_node = v; 891 struct nr_node *nr_node = v;
891 nr_node_lock(nr_node); 892 nr_node_lock(nr_node);
892 seq_printf(seq, "%-9s %-7s %d %d", 893 seq_printf(seq, "%-9s %-7s %d %d",
893 ax2asc(&nr_node->callsign), 894 ax2asc(buf, &nr_node->callsign),
894 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, 895 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
895 nr_node->which + 1, 896 nr_node->which + 1,
896 nr_node->count); 897 nr_node->count);
@@ -964,6 +965,7 @@ static void nr_neigh_stop(struct seq_file *seq, void *v)
964 965
965static int nr_neigh_show(struct seq_file *seq, void *v) 966static int nr_neigh_show(struct seq_file *seq, void *v)
966{ 967{
968 char buf[11];
967 int i; 969 int i;
968 970
969 if (v == SEQ_START_TOKEN) 971 if (v == SEQ_START_TOKEN)
@@ -973,7 +975,7 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
973 975
974 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", 976 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
975 nr_neigh->number, 977 nr_neigh->number,
976 ax2asc(&nr_neigh->callsign), 978 ax2asc(buf, &nr_neigh->callsign),
977 nr_neigh->dev ? nr_neigh->dev->name : "???", 979 nr_neigh->dev ? nr_neigh->dev->name : "???",
978 nr_neigh->quality, 980 nr_neigh->quality,
979 nr_neigh->locked, 981 nr_neigh->locked,
@@ -983,7 +985,7 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
983 if (nr_neigh->digipeat != NULL) { 985 if (nr_neigh->digipeat != NULL) {
984 for (i = 0; i < nr_neigh->digipeat->ndigi; i++) 986 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
985 seq_printf(seq, " %s", 987 seq_printf(seq, " %s",
986 ax2asc(&nr_neigh->digipeat->calls[i])); 988 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
987 } 989 }
988 990
989 seq_puts(seq, "\n"); 991 seq_puts(seq, "\n");
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ba997095f08f..8690f171c1ef 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1535,8 +1535,7 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
1535static void packet_mm_open(struct vm_area_struct *vma) 1535static void packet_mm_open(struct vm_area_struct *vma)
1536{ 1536{
1537 struct file *file = vma->vm_file; 1537 struct file *file = vma->vm_file;
1538 struct inode *inode = file->f_dentry->d_inode; 1538 struct socket * sock = file->private_data;
1539 struct socket * sock = SOCKET_I(inode);
1540 struct sock *sk = sock->sk; 1539 struct sock *sk = sock->sk;
1541 1540
1542 if (sk) 1541 if (sk)
@@ -1546,8 +1545,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
1546static void packet_mm_close(struct vm_area_struct *vma) 1545static void packet_mm_close(struct vm_area_struct *vma)
1547{ 1546{
1548 struct file *file = vma->vm_file; 1547 struct file *file = vma->vm_file;
1549 struct inode *inode = file->f_dentry->d_inode; 1548 struct socket * sock = file->private_data;
1550 struct socket * sock = SOCKET_I(inode);
1551 struct sock *sk = sock->sk; 1549 struct sock *sk = sock->sk;
1552 1550
1553 if (sk) 1551 if (sk)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index c6e59f84c3ae..3077878ed4f0 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1363,6 +1363,8 @@ static void rose_info_stop(struct seq_file *seq, void *v)
1363 1363
1364static int rose_info_show(struct seq_file *seq, void *v) 1364static int rose_info_show(struct seq_file *seq, void *v)
1365{ 1365{
1366 char buf[11];
1367
1366 if (v == SEQ_START_TOKEN) 1368 if (v == SEQ_START_TOKEN)
1367 seq_puts(seq, 1369 seq_puts(seq,
1368 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); 1370 "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
@@ -1380,12 +1382,12 @@ static int rose_info_show(struct seq_file *seq, void *v)
1380 1382
1381 seq_printf(seq, "%-10s %-9s ", 1383 seq_printf(seq, "%-10s %-9s ",
1382 rose2asc(&rose->dest_addr), 1384 rose2asc(&rose->dest_addr),
1383 ax2asc(&rose->dest_call)); 1385 ax2asc(buf, &rose->dest_call));
1384 1386
1385 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) 1387 if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1386 callsign = "??????-?"; 1388 callsign = "??????-?";
1387 else 1389 else
1388 callsign = ax2asc(&rose->source_call); 1390 callsign = ax2asc(buf, &rose->source_call);
1389 1391
1390 seq_printf(seq, 1392 seq_printf(seq,
1391 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", 1393 "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 4510cd7613ec..e556d92c0bc4 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -851,6 +851,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
851 unsigned char cause, diagnostic; 851 unsigned char cause, diagnostic;
852 struct net_device *dev; 852 struct net_device *dev;
853 int len, res = 0; 853 int len, res = 0;
854 char buf[11];
854 855
855#if 0 856#if 0
856 if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT) 857 if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT)
@@ -876,7 +877,7 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
876 877
877 if (rose_neigh == NULL) { 878 if (rose_neigh == NULL) {
878 printk("rose_route : unknown neighbour or device %s\n", 879 printk("rose_route : unknown neighbour or device %s\n",
879 ax2asc(&ax25->dest_addr)); 880 ax2asc(buf, &ax25->dest_addr));
880 goto out; 881 goto out;
881 } 882 }
882 883
@@ -1178,6 +1179,7 @@ static void rose_neigh_stop(struct seq_file *seq, void *v)
1178 1179
1179static int rose_neigh_show(struct seq_file *seq, void *v) 1180static int rose_neigh_show(struct seq_file *seq, void *v)
1180{ 1181{
1182 char buf[11];
1181 int i; 1183 int i;
1182 1184
1183 if (v == SEQ_START_TOKEN) 1185 if (v == SEQ_START_TOKEN)
@@ -1189,7 +1191,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
1189 /* if (!rose_neigh->loopback) { */ 1191 /* if (!rose_neigh->loopback) { */
1190 seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu", 1192 seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu",
1191 rose_neigh->number, 1193 rose_neigh->number,
1192 (rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(&rose_neigh->callsign), 1194 (rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign),
1193 rose_neigh->dev ? rose_neigh->dev->name : "???", 1195 rose_neigh->dev ? rose_neigh->dev->name : "???",
1194 rose_neigh->count, 1196 rose_neigh->count,
1195 rose_neigh->use, 1197 rose_neigh->use,
@@ -1200,7 +1202,7 @@ static int rose_neigh_show(struct seq_file *seq, void *v)
1200 1202
1201 if (rose_neigh->digipeat != NULL) { 1203 if (rose_neigh->digipeat != NULL) {
1202 for (i = 0; i < rose_neigh->digipeat->ndigi; i++) 1204 for (i = 0; i < rose_neigh->digipeat->ndigi; i++)
1203 seq_printf(seq, " %s", ax2asc(&rose_neigh->digipeat->calls[i])); 1205 seq_printf(seq, " %s", ax2asc(buf, &rose_neigh->digipeat->calls[i]));
1204 } 1206 }
1205 1207
1206 seq_puts(seq, "\n"); 1208 seq_puts(seq, "\n");
@@ -1260,6 +1262,8 @@ static void rose_route_stop(struct seq_file *seq, void *v)
1260 1262
1261static int rose_route_show(struct seq_file *seq, void *v) 1263static int rose_route_show(struct seq_file *seq, void *v)
1262{ 1264{
1265 char buf[11];
1266
1263 if (v == SEQ_START_TOKEN) 1267 if (v == SEQ_START_TOKEN)
1264 seq_puts(seq, 1268 seq_puts(seq,
1265 "lci address callsign neigh <-> lci address callsign neigh\n"); 1269 "lci address callsign neigh <-> lci address callsign neigh\n");
@@ -1271,7 +1275,7 @@ static int rose_route_show(struct seq_file *seq, void *v)
1271 "%3.3X %-10s %-9s %05d ", 1275 "%3.3X %-10s %-9s %05d ",
1272 rose_route->lci1, 1276 rose_route->lci1,
1273 rose2asc(&rose_route->src_addr), 1277 rose2asc(&rose_route->src_addr),
1274 ax2asc(&rose_route->src_call), 1278 ax2asc(buf, &rose_route->src_call),
1275 rose_route->neigh1->number); 1279 rose_route->neigh1->number);
1276 else 1280 else
1277 seq_puts(seq, 1281 seq_puts(seq,
@@ -1282,7 +1286,7 @@ static int rose_route_show(struct seq_file *seq, void *v)
1282 "%3.3X %-10s %-9s %05d\n", 1286 "%3.3X %-10s %-9s %05d\n",
1283 rose_route->lci2, 1287 rose_route->lci2,
1284 rose2asc(&rose_route->dest_addr), 1288 rose2asc(&rose_route->dest_addr),
1285 ax2asc(&rose_route->dest_call), 1289 ax2asc(buf, &rose_route->dest_call),
1286 rose_route->neigh2->number); 1290 rose_route->neigh2->number);
1287 else 1291 else
1288 seq_puts(seq, 1292 seq_puts(seq,
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index a29a3a960fd6..02891ce2db37 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -400,6 +400,7 @@ static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
400{ 400{
401 unsigned char *p = buffer + 1; 401 unsigned char *p = buffer + 1;
402 char *callsign; 402 char *callsign;
403 char buf[11];
403 int len, nb; 404 int len, nb;
404 405
405 /* National Facilities */ 406 /* National Facilities */
@@ -456,7 +457,7 @@ static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
456 457
457 *p++ = FAC_CCITT_DEST_NSAP; 458 *p++ = FAC_CCITT_DEST_NSAP;
458 459
459 callsign = ax2asc(&rose->dest_call); 460 callsign = ax2asc(buf, &rose->dest_call);
460 461
461 *p++ = strlen(callsign) + 10; 462 *p++ = strlen(callsign) + 10;
462 *p++ = (strlen(callsign) + 9) * 2; /* ??? */ 463 *p++ = (strlen(callsign) + 9) * 2; /* ??? */
@@ -471,7 +472,7 @@ static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose)
471 472
472 *p++ = FAC_CCITT_SRC_NSAP; 473 *p++ = FAC_CCITT_SRC_NSAP;
473 474
474 callsign = ax2asc(&rose->source_call); 475 callsign = ax2asc(buf, &rose->source_call);
475 476
476 *p++ = strlen(callsign) + 10; 477 *p++ = strlen(callsign) + 10;
477 *p++ = (strlen(callsign) + 9) * 2; /* ??? */ 478 *p++ = (strlen(callsign) + 9) * 2; /* ??? */
diff --git a/net/socket.c b/net/socket.c
index 94fe638b4d72..e1bd5d84d7bf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -667,7 +667,7 @@ static ssize_t sock_aio_read(struct kiocb *iocb, char __user *ubuf,
667 } 667 }
668 iocb->private = x; 668 iocb->private = x;
669 x->kiocb = iocb; 669 x->kiocb = iocb;
670 sock = SOCKET_I(iocb->ki_filp->f_dentry->d_inode); 670 sock = iocb->ki_filp->private_data;
671 671
672 x->async_msg.msg_name = NULL; 672 x->async_msg.msg_name = NULL;
673 x->async_msg.msg_namelen = 0; 673 x->async_msg.msg_namelen = 0;
@@ -709,7 +709,7 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const char __user *ubuf,
709 } 709 }
710 iocb->private = x; 710 iocb->private = x;
711 x->kiocb = iocb; 711 x->kiocb = iocb;
712 sock = SOCKET_I(iocb->ki_filp->f_dentry->d_inode); 712 sock = iocb->ki_filp->private_data;
713 713
714 x->async_msg.msg_name = NULL; 714 x->async_msg.msg_name = NULL;
715 x->async_msg.msg_namelen = 0; 715 x->async_msg.msg_namelen = 0;
@@ -732,7 +732,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
732 struct socket *sock; 732 struct socket *sock;
733 int flags; 733 int flags;
734 734
735 sock = SOCKET_I(file->f_dentry->d_inode); 735 sock = file->private_data;
736 736
737 flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; 737 flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
738 if (more) 738 if (more)
@@ -741,14 +741,14 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
741 return sock->ops->sendpage(sock, page, offset, size, flags); 741 return sock->ops->sendpage(sock, page, offset, size, flags);
742} 742}
743 743
744static int sock_readv_writev(int type, struct inode * inode, 744static int sock_readv_writev(int type,
745 struct file * file, const struct iovec * iov, 745 struct file * file, const struct iovec * iov,
746 long count, size_t size) 746 long count, size_t size)
747{ 747{
748 struct msghdr msg; 748 struct msghdr msg;
749 struct socket *sock; 749 struct socket *sock;
750 750
751 sock = SOCKET_I(inode); 751 sock = file->private_data;
752 752
753 msg.msg_name = NULL; 753 msg.msg_name = NULL;
754 msg.msg_namelen = 0; 754 msg.msg_namelen = 0;
@@ -775,7 +775,7 @@ static ssize_t sock_readv(struct file *file, const struct iovec *vector,
775 int i; 775 int i;
776 for (i = 0 ; i < count ; i++) 776 for (i = 0 ; i < count ; i++)
777 tot_len += vector[i].iov_len; 777 tot_len += vector[i].iov_len;
778 return sock_readv_writev(VERIFY_WRITE, file->f_dentry->d_inode, 778 return sock_readv_writev(VERIFY_WRITE,
779 file, vector, count, tot_len); 779 file, vector, count, tot_len);
780} 780}
781 781
@@ -786,7 +786,7 @@ static ssize_t sock_writev(struct file *file, const struct iovec *vector,
786 int i; 786 int i;
787 for (i = 0 ; i < count ; i++) 787 for (i = 0 ; i < count ; i++)
788 tot_len += vector[i].iov_len; 788 tot_len += vector[i].iov_len;
789 return sock_readv_writev(VERIFY_READ, file->f_dentry->d_inode, 789 return sock_readv_writev(VERIFY_READ,
790 file, vector, count, tot_len); 790 file, vector, count, tot_len);
791} 791}
792 792
@@ -840,7 +840,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
840 void __user *argp = (void __user *)arg; 840 void __user *argp = (void __user *)arg;
841 int pid, err; 841 int pid, err;
842 842
843 sock = SOCKET_I(file->f_dentry->d_inode); 843 sock = file->private_data;
844 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) { 844 if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
845 err = dev_ioctl(cmd, argp); 845 err = dev_ioctl(cmd, argp);
846 } else 846 } else
@@ -939,13 +939,13 @@ static unsigned int sock_poll(struct file *file, poll_table * wait)
939 /* 939 /*
940 * We can't return errors to poll, so it's either yes or no. 940 * We can't return errors to poll, so it's either yes or no.
941 */ 941 */
942 sock = SOCKET_I(file->f_dentry->d_inode); 942 sock = file->private_data;
943 return sock->ops->poll(file, sock, wait); 943 return sock->ops->poll(file, sock, wait);
944} 944}
945 945
946static int sock_mmap(struct file * file, struct vm_area_struct * vma) 946static int sock_mmap(struct file * file, struct vm_area_struct * vma)
947{ 947{
948 struct socket *sock = SOCKET_I(file->f_dentry->d_inode); 948 struct socket *sock = file->private_data;
949 949
950 return sock->ops->mmap(file, sock, vma); 950 return sock->ops->mmap(file, sock, vma);
951} 951}
@@ -995,7 +995,7 @@ static int sock_fasync(int fd, struct file *filp, int on)
995 return -ENOMEM; 995 return -ENOMEM;
996 } 996 }
997 997
998 sock = SOCKET_I(filp->f_dentry->d_inode); 998 sock = filp->private_data;
999 999
1000 if ((sk=sock->sk) == NULL) { 1000 if ((sk=sock->sk) == NULL) {
1001 kfree(fna); 1001 kfree(fna);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 5c8fe3bfc494..e3308195374e 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -250,6 +250,7 @@ out:
250} 250}
251 251
252static struct cache_detail rsi_cache = { 252static struct cache_detail rsi_cache = {
253 .owner = THIS_MODULE,
253 .hash_size = RSI_HASHMAX, 254 .hash_size = RSI_HASHMAX,
254 .hash_table = rsi_table, 255 .hash_table = rsi_table,
255 .name = "auth.rpcsec.init", 256 .name = "auth.rpcsec.init",
@@ -436,6 +437,7 @@ out:
436} 437}
437 438
438static struct cache_detail rsc_cache = { 439static struct cache_detail rsc_cache = {
440 .owner = THIS_MODULE,
439 .hash_size = RSC_HASHMAX, 441 .hash_size = RSC_HASHMAX,
440 .hash_table = rsc_table, 442 .hash_table = rsc_table,
441 .name = "auth.rpcsec.context", 443 .name = "auth.rpcsec.context",
@@ -1074,7 +1076,9 @@ gss_svc_init(void)
1074void 1076void
1075gss_svc_shutdown(void) 1077gss_svc_shutdown(void)
1076{ 1078{
1077 cache_unregister(&rsc_cache); 1079 if (cache_unregister(&rsc_cache))
1078 cache_unregister(&rsi_cache); 1080 printk(KERN_ERR "auth_rpcgss: failed to unregister rsc cache\n");
1081 if (cache_unregister(&rsi_cache))
1082 printk(KERN_ERR "auth_rpcgss: failed to unregister rsi cache\n");
1079 svc_auth_unregister(RPC_AUTH_GSS); 1083 svc_auth_unregister(RPC_AUTH_GSS);
1080} 1084}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 900f5bc7e336..f509e9992767 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -177,7 +177,7 @@ void cache_register(struct cache_detail *cd)
177 cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc); 177 cd->proc_ent = proc_mkdir(cd->name, proc_net_rpc);
178 if (cd->proc_ent) { 178 if (cd->proc_ent) {
179 struct proc_dir_entry *p; 179 struct proc_dir_entry *p;
180 cd->proc_ent->owner = THIS_MODULE; 180 cd->proc_ent->owner = cd->owner;
181 cd->channel_ent = cd->content_ent = NULL; 181 cd->channel_ent = cd->content_ent = NULL;
182 182
183 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, 183 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
@@ -185,7 +185,7 @@ void cache_register(struct cache_detail *cd)
185 cd->flush_ent = p; 185 cd->flush_ent = p;
186 if (p) { 186 if (p) {
187 p->proc_fops = &cache_flush_operations; 187 p->proc_fops = &cache_flush_operations;
188 p->owner = THIS_MODULE; 188 p->owner = cd->owner;
189 p->data = cd; 189 p->data = cd;
190 } 190 }
191 191
@@ -195,7 +195,7 @@ void cache_register(struct cache_detail *cd)
195 cd->channel_ent = p; 195 cd->channel_ent = p;
196 if (p) { 196 if (p) {
197 p->proc_fops = &cache_file_operations; 197 p->proc_fops = &cache_file_operations;
198 p->owner = THIS_MODULE; 198 p->owner = cd->owner;
199 p->data = cd; 199 p->data = cd;
200 } 200 }
201 } 201 }
@@ -205,7 +205,7 @@ void cache_register(struct cache_detail *cd)
205 cd->content_ent = p; 205 cd->content_ent = p;
206 if (p) { 206 if (p) {
207 p->proc_fops = &content_file_operations; 207 p->proc_fops = &content_file_operations;
208 p->owner = THIS_MODULE; 208 p->owner = cd->owner;
209 p->data = cd; 209 p->data = cd;
210 } 210 }
211 } 211 }
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 9b67dc19944c..4979f226e285 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -35,13 +35,13 @@ static int rpc_proc_show(struct seq_file *seq, void *v) {
35 int i, j; 35 int i, j;
36 36
37 seq_printf(seq, 37 seq_printf(seq,
38 "net %d %d %d %d\n", 38 "net %u %u %u %u\n",
39 statp->netcnt, 39 statp->netcnt,
40 statp->netudpcnt, 40 statp->netudpcnt,
41 statp->nettcpcnt, 41 statp->nettcpcnt,
42 statp->nettcpconn); 42 statp->nettcpconn);
43 seq_printf(seq, 43 seq_printf(seq,
44 "rpc %d %d %d\n", 44 "rpc %u %u %u\n",
45 statp->rpccnt, 45 statp->rpccnt,
46 statp->rpcretrans, 46 statp->rpcretrans,
47 statp->rpcauthrefresh); 47 statp->rpcauthrefresh);
@@ -50,10 +50,10 @@ static int rpc_proc_show(struct seq_file *seq, void *v) {
50 const struct rpc_version *vers = prog->version[i]; 50 const struct rpc_version *vers = prog->version[i];
51 if (!vers) 51 if (!vers)
52 continue; 52 continue;
53 seq_printf(seq, "proc%d %d", 53 seq_printf(seq, "proc%u %u",
54 vers->number, vers->nrprocs); 54 vers->number, vers->nrprocs);
55 for (j = 0; j < vers->nrprocs; j++) 55 for (j = 0; j < vers->nrprocs; j++)
56 seq_printf(seq, " %d", 56 seq_printf(seq, " %u",
57 vers->procs[j].p_count); 57 vers->procs[j].p_count);
58 seq_putc(seq, '\n'); 58 seq_putc(seq, '\n');
59 } 59 }
@@ -83,13 +83,13 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
83 int i, j; 83 int i, j;
84 84
85 seq_printf(seq, 85 seq_printf(seq,
86 "net %d %d %d %d\n", 86 "net %u %u %u %u\n",
87 statp->netcnt, 87 statp->netcnt,
88 statp->netudpcnt, 88 statp->netudpcnt,
89 statp->nettcpcnt, 89 statp->nettcpcnt,
90 statp->nettcpconn); 90 statp->nettcpconn);
91 seq_printf(seq, 91 seq_printf(seq,
92 "rpc %d %d %d %d %d\n", 92 "rpc %u %u %u %u %u\n",
93 statp->rpccnt, 93 statp->rpccnt,
94 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt, 94 statp->rpcbadfmt+statp->rpcbadauth+statp->rpcbadclnt,
95 statp->rpcbadfmt, 95 statp->rpcbadfmt,
@@ -99,9 +99,9 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
99 for (i = 0; i < prog->pg_nvers; i++) { 99 for (i = 0; i < prog->pg_nvers; i++) {
100 if (!(vers = prog->pg_vers[i]) || !(proc = vers->vs_proc)) 100 if (!(vers = prog->pg_vers[i]) || !(proc = vers->vs_proc))
101 continue; 101 continue;
102 seq_printf(seq, "proc%d %d", i, vers->vs_nproc); 102 seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
103 for (j = 0; j < vers->vs_nproc; j++, proc++) 103 for (j = 0; j < vers->vs_nproc; j++, proc++)
104 seq_printf(seq, " %d", proc->pc_count); 104 seq_printf(seq, " %u", proc->pc_count);
105 seq_putc(seq, '\n'); 105 seq_putc(seq, '\n');
106 } 106 }
107} 107}
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 62a073495276..ed48ff022d35 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -176,8 +176,10 @@ cleanup_sunrpc(void)
176{ 176{
177 unregister_rpc_pipefs(); 177 unregister_rpc_pipefs();
178 rpc_destroy_mempool(); 178 rpc_destroy_mempool();
179 cache_unregister(&auth_domain_cache); 179 if (cache_unregister(&auth_domain_cache))
180 cache_unregister(&ip_map_cache); 180 printk(KERN_ERR "sunrpc: failed to unregister auth_domain cache\n");
181 if (cache_unregister(&ip_map_cache))
182 printk(KERN_ERR "sunrpc: failed to unregister ip_map cache\n");
181#ifdef RPC_DEBUG 183#ifdef RPC_DEBUG
182 rpc_unregister_sysctl(); 184 rpc_unregister_sysctl();
183#endif 185#endif
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index bde8147ef2db..dda4f0c63511 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -143,6 +143,7 @@ static void auth_domain_drop(struct cache_head *item, struct cache_detail *cd)
143 143
144 144
145struct cache_detail auth_domain_cache = { 145struct cache_detail auth_domain_cache = {
146 .owner = THIS_MODULE,
146 .hash_size = DN_HASHMAX, 147 .hash_size = DN_HASHMAX,
147 .hash_table = auth_domain_table, 148 .hash_table = auth_domain_table,
148 .name = "auth.domain", 149 .name = "auth.domain",
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index d6baf6fdf8a9..cac2e774dd81 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -242,6 +242,7 @@ static int ip_map_show(struct seq_file *m,
242 242
243 243
244struct cache_detail ip_map_cache = { 244struct cache_detail ip_map_cache = {
245 .owner = THIS_MODULE,
245 .hash_size = IP_HASHMAX, 246 .hash_size = IP_HASHMAX,
246 .hash_table = ip_table, 247 .hash_table = ip_table,
247 .name = "auth.unix.ip", 248 .name = "auth.unix.ip",
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 9be41a9f5aff..d591578bd3b2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -24,75 +24,37 @@
24 * 24 *
25 */ 25 */
26 26
27#define _GNU_SOURCE
28
27#include <stdio.h> 29#include <stdio.h>
28#include <stdlib.h> 30#include <stdlib.h>
29#include <string.h> 31#include <string.h>
30#include <ctype.h> 32#include <ctype.h>
31 33
32/* maximum token length used. It doesn't pay to increase it a lot, because
33 * very long substrings probably don't repeat themselves too often. */
34#define MAX_TOK_SIZE 11
35#define KSYM_NAME_LEN 127 34#define KSYM_NAME_LEN 127
36 35
37/* we use only a subset of the complete symbol table to gather the token count,
38 * to speed up compression, at the expense of a little compression ratio */
39#define WORKING_SET 1024
40
41/* first find the best token only on the list of tokens that would profit more
42 * than GOOD_BAD_THRESHOLD. Only if this list is empty go to the "bad" list.
43 * Increasing this value will put less tokens on the "good" list, so the search
44 * is faster. However, if the good list runs out of tokens, we must painfully
45 * search the bad list. */
46#define GOOD_BAD_THRESHOLD 10
47
48/* token hash parameters */
49#define HASH_BITS 18
50#define HASH_TABLE_SIZE (1 << HASH_BITS)
51#define HASH_MASK (HASH_TABLE_SIZE - 1)
52#define HASH_BASE_OFFSET 2166136261U
53#define HASH_FOLD(a) ((a)&(HASH_MASK))
54
55/* flags to mark symbols */
56#define SYM_FLAG_VALID 1
57#define SYM_FLAG_SAMPLED 2
58 36
59struct sym_entry { 37struct sym_entry {
60 unsigned long long addr; 38 unsigned long long addr;
61 char type; 39 unsigned int len;
62 unsigned char flags;
63 unsigned char len;
64 unsigned char *sym; 40 unsigned char *sym;
65}; 41};
66 42
67 43
68static struct sym_entry *table; 44static struct sym_entry *table;
69static int size, cnt; 45static unsigned int table_size, table_cnt;
70static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext; 46static unsigned long long _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
71static int all_symbols = 0; 47static int all_symbols = 0;
72static char symbol_prefix_char = '\0'; 48static char symbol_prefix_char = '\0';
73 49
74struct token { 50int token_profit[0x10000];
75 unsigned char data[MAX_TOK_SIZE];
76 unsigned char len;
77 /* profit: the number of bytes that could be saved by inserting this
78 * token into the table */
79 int profit;
80 struct token *next; /* next token on the hash list */
81 struct token *right; /* next token on the good/bad list */
82 struct token *left; /* previous token on the good/bad list */
83 struct token *smaller; /* token that is less one letter than this one */
84 };
85
86struct token bad_head, good_head;
87struct token *hash_table[HASH_TABLE_SIZE];
88 51
89/* the table that holds the result of the compression */ 52/* the table that holds the result of the compression */
90unsigned char best_table[256][MAX_TOK_SIZE+1]; 53unsigned char best_table[256][2];
91unsigned char best_table_len[256]; 54unsigned char best_table_len[256];
92 55
93 56
94static void 57static void usage(void)
95usage(void)
96{ 58{
97 fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n"); 59 fprintf(stderr, "Usage: kallsyms [--all-symbols] [--symbol-prefix=<prefix char>] < in.map > out.S\n");
98 exit(1); 60 exit(1);
@@ -102,21 +64,19 @@ usage(void)
102 * This ignores the intensely annoying "mapping symbols" found 64 * This ignores the intensely annoying "mapping symbols" found
103 * in ARM ELF files: $a, $t and $d. 65 * in ARM ELF files: $a, $t and $d.
104 */ 66 */
105static inline int 67static inline int is_arm_mapping_symbol(const char *str)
106is_arm_mapping_symbol(const char *str)
107{ 68{
108 return str[0] == '$' && strchr("atd", str[1]) 69 return str[0] == '$' && strchr("atd", str[1])
109 && (str[2] == '\0' || str[2] == '.'); 70 && (str[2] == '\0' || str[2] == '.');
110} 71}
111 72
112static int 73static int read_symbol(FILE *in, struct sym_entry *s)
113read_symbol(FILE *in, struct sym_entry *s)
114{ 74{
115 char str[500]; 75 char str[500];
116 char *sym; 76 char *sym, stype;
117 int rc; 77 int rc;
118 78
119 rc = fscanf(in, "%llx %c %499s\n", &s->addr, &s->type, str); 79 rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str);
120 if (rc != 3) { 80 if (rc != 3) {
121 if (rc != EOF) { 81 if (rc != EOF) {
122 /* skip line */ 82 /* skip line */
@@ -143,7 +103,7 @@ read_symbol(FILE *in, struct sym_entry *s)
143 _sextratext = s->addr; 103 _sextratext = s->addr;
144 else if (strcmp(sym, "_eextratext") == 0) 104 else if (strcmp(sym, "_eextratext") == 0)
145 _eextratext = s->addr; 105 _eextratext = s->addr;
146 else if (toupper(s->type) == 'A') 106 else if (toupper(stype) == 'A')
147 { 107 {
148 /* Keep these useful absolute symbols */ 108 /* Keep these useful absolute symbols */
149 if (strcmp(sym, "__kernel_syscall_via_break") && 109 if (strcmp(sym, "__kernel_syscall_via_break") &&
@@ -153,22 +113,24 @@ read_symbol(FILE *in, struct sym_entry *s)
153 return -1; 113 return -1;
154 114
155 } 115 }
156 else if (toupper(s->type) == 'U' || 116 else if (toupper(stype) == 'U' ||
157 is_arm_mapping_symbol(sym)) 117 is_arm_mapping_symbol(sym))
158 return -1; 118 return -1;
119 /* exclude also MIPS ELF local symbols ($L123 instead of .L123) */
120 else if (str[0] == '$')
121 return -1;
159 122
160 /* include the type field in the symbol name, so that it gets 123 /* include the type field in the symbol name, so that it gets
161 * compressed together */ 124 * compressed together */
162 s->len = strlen(str) + 1; 125 s->len = strlen(str) + 1;
163 s->sym = (char *) malloc(s->len + 1); 126 s->sym = malloc(s->len + 1);
164 strcpy(s->sym + 1, str); 127 strcpy((char *)s->sym + 1, str);
165 s->sym[0] = s->type; 128 s->sym[0] = stype;
166 129
167 return 0; 130 return 0;
168} 131}
169 132
170static int 133static int symbol_valid(struct sym_entry *s)
171symbol_valid(struct sym_entry *s)
172{ 134{
173 /* Symbols which vary between passes. Passes 1 and 2 must have 135 /* Symbols which vary between passes. Passes 1 and 2 must have
174 * identical symbol lists. The kallsyms_* symbols below are only added 136 * identical symbol lists. The kallsyms_* symbols below are only added
@@ -214,30 +176,29 @@ symbol_valid(struct sym_entry *s)
214 } 176 }
215 177
216 /* Exclude symbols which vary between passes. */ 178 /* Exclude symbols which vary between passes. */
217 if (strstr(s->sym + offset, "_compiled.")) 179 if (strstr((char *)s->sym + offset, "_compiled."))
218 return 0; 180 return 0;
219 181
220 for (i = 0; special_symbols[i]; i++) 182 for (i = 0; special_symbols[i]; i++)
221 if( strcmp(s->sym + offset, special_symbols[i]) == 0 ) 183 if( strcmp((char *)s->sym + offset, special_symbols[i]) == 0 )
222 return 0; 184 return 0;
223 185
224 return 1; 186 return 1;
225} 187}
226 188
227static void 189static void read_map(FILE *in)
228read_map(FILE *in)
229{ 190{
230 while (!feof(in)) { 191 while (!feof(in)) {
231 if (cnt >= size) { 192 if (table_cnt >= table_size) {
232 size += 10000; 193 table_size += 10000;
233 table = realloc(table, sizeof(*table) * size); 194 table = realloc(table, sizeof(*table) * table_size);
234 if (!table) { 195 if (!table) {
235 fprintf(stderr, "out of memory\n"); 196 fprintf(stderr, "out of memory\n");
236 exit (1); 197 exit (1);
237 } 198 }
238 } 199 }
239 if (read_symbol(in, &table[cnt]) == 0) 200 if (read_symbol(in, &table[table_cnt]) == 0)
240 cnt++; 201 table_cnt++;
241 } 202 }
242} 203}
243 204
@@ -281,10 +242,9 @@ static int expand_symbol(unsigned char *data, int len, char *result)
281 return total; 242 return total;
282} 243}
283 244
284static void 245static void write_src(void)
285write_src(void)
286{ 246{
287 int i, k, off, valid; 247 unsigned int i, k, off;
288 unsigned int best_idx[256]; 248 unsigned int best_idx[256];
289 unsigned int *markers; 249 unsigned int *markers;
290 char buf[KSYM_NAME_LEN+1]; 250 char buf[KSYM_NAME_LEN+1];
@@ -301,33 +261,24 @@ write_src(void)
301 printf(".data\n"); 261 printf(".data\n");
302 262
303 output_label("kallsyms_addresses"); 263 output_label("kallsyms_addresses");
304 valid = 0; 264 for (i = 0; i < table_cnt; i++) {
305 for (i = 0; i < cnt; i++) { 265 printf("\tPTR\t%#llx\n", table[i].addr);
306 if (table[i].flags & SYM_FLAG_VALID) {
307 printf("\tPTR\t%#llx\n", table[i].addr);
308 valid++;
309 }
310 } 266 }
311 printf("\n"); 267 printf("\n");
312 268
313 output_label("kallsyms_num_syms"); 269 output_label("kallsyms_num_syms");
314 printf("\tPTR\t%d\n", valid); 270 printf("\tPTR\t%d\n", table_cnt);
315 printf("\n"); 271 printf("\n");
316 272
317 /* table of offset markers, that give the offset in the compressed stream 273 /* table of offset markers, that give the offset in the compressed stream
318 * every 256 symbols */ 274 * every 256 symbols */
319 markers = (unsigned int *) malloc(sizeof(unsigned int)*((valid + 255) / 256)); 275 markers = (unsigned int *) malloc(sizeof(unsigned int) * ((table_cnt + 255) / 256));
320 276
321 output_label("kallsyms_names"); 277 output_label("kallsyms_names");
322 valid = 0;
323 off = 0; 278 off = 0;
324 for (i = 0; i < cnt; i++) { 279 for (i = 0; i < table_cnt; i++) {
325 280 if ((i & 0xFF) == 0)
326 if (!table[i].flags & SYM_FLAG_VALID) 281 markers[i >> 8] = off;
327 continue;
328
329 if ((valid & 0xFF) == 0)
330 markers[valid >> 8] = off;
331 282
332 printf("\t.byte 0x%02x", table[i].len); 283 printf("\t.byte 0x%02x", table[i].len);
333 for (k = 0; k < table[i].len; k++) 284 for (k = 0; k < table[i].len; k++)
@@ -335,12 +286,11 @@ write_src(void)
335 printf("\n"); 286 printf("\n");
336 287
337 off += table[i].len + 1; 288 off += table[i].len + 1;
338 valid++;
339 } 289 }
340 printf("\n"); 290 printf("\n");
341 291
342 output_label("kallsyms_markers"); 292 output_label("kallsyms_markers");
343 for (i = 0; i < ((valid + 255) >> 8); i++) 293 for (i = 0; i < ((table_cnt + 255) >> 8); i++)
344 printf("\tPTR\t%d\n", markers[i]); 294 printf("\tPTR\t%d\n", markers[i]);
345 printf("\n"); 295 printf("\n");
346 296
@@ -350,7 +300,7 @@ write_src(void)
350 off = 0; 300 off = 0;
351 for (i = 0; i < 256; i++) { 301 for (i = 0; i < 256; i++) {
352 best_idx[i] = off; 302 best_idx[i] = off;
353 expand_symbol(best_table[i],best_table_len[i],buf); 303 expand_symbol(best_table[i], best_table_len[i], buf);
354 printf("\t.asciz\t\"%s\"\n", buf); 304 printf("\t.asciz\t\"%s\"\n", buf);
355 off += strlen(buf) + 1; 305 off += strlen(buf) + 1;
356 } 306 }
@@ -365,153 +315,13 @@ write_src(void)
365 315
366/* table lookup compression functions */ 316/* table lookup compression functions */
367 317
368static inline unsigned int rehash_token(unsigned int hash, unsigned char data)
369{
370 return ((hash * 16777619) ^ data);
371}
372
373static unsigned int hash_token(unsigned char *data, int len)
374{
375 unsigned int hash=HASH_BASE_OFFSET;
376 int i;
377
378 for (i = 0; i < len; i++)
379 hash = rehash_token(hash, data[i]);
380
381 return HASH_FOLD(hash);
382}
383
384/* find a token given its data and hash value */
385static struct token *find_token_hash(unsigned char *data, int len, unsigned int hash)
386{
387 struct token *ptr;
388
389 ptr = hash_table[hash];
390
391 while (ptr) {
392 if ((ptr->len == len) && (memcmp(ptr->data, data, len) == 0))
393 return ptr;
394 ptr=ptr->next;
395 }
396
397 return NULL;
398}
399
400static inline void insert_token_in_group(struct token *head, struct token *ptr)
401{
402 ptr->right = head->right;
403 ptr->right->left = ptr;
404 head->right = ptr;
405 ptr->left = head;
406}
407
408static inline void remove_token_from_group(struct token *ptr)
409{
410 ptr->left->right = ptr->right;
411 ptr->right->left = ptr->left;
412}
413
414
415/* build the counts for all the tokens that start with "data", and have lenghts
416 * from 2 to "len" */
417static void learn_token(unsigned char *data, int len)
418{
419 struct token *ptr,*last_ptr;
420 int i, newprofit;
421 unsigned int hash = HASH_BASE_OFFSET;
422 unsigned int hashes[MAX_TOK_SIZE + 1];
423
424 if (len > MAX_TOK_SIZE)
425 len = MAX_TOK_SIZE;
426
427 /* calculate and store the hash values for all the sub-tokens */
428 hash = rehash_token(hash, data[0]);
429 for (i = 2; i <= len; i++) {
430 hash = rehash_token(hash, data[i-1]);
431 hashes[i] = HASH_FOLD(hash);
432 }
433
434 last_ptr = NULL;
435 ptr = NULL;
436
437 for (i = len; i >= 2; i--) {
438 hash = hashes[i];
439
440 if (!ptr) ptr = find_token_hash(data, i, hash);
441
442 if (!ptr) {
443 /* create a new token entry */
444 ptr = (struct token *) malloc(sizeof(*ptr));
445
446 memcpy(ptr->data, data, i);
447 ptr->len = i;
448
449 /* when we create an entry, it's profit is 0 because
450 * we also take into account the size of the token on
451 * the compressed table. We then subtract GOOD_BAD_THRESHOLD
452 * so that the test to see if this token belongs to
453 * the good or bad list, is a comparison to zero */
454 ptr->profit = -GOOD_BAD_THRESHOLD;
455
456 ptr->next = hash_table[hash];
457 hash_table[hash] = ptr;
458
459 insert_token_in_group(&bad_head, ptr);
460
461 ptr->smaller = NULL;
462 } else {
463 newprofit = ptr->profit + (ptr->len - 1);
464 /* check to see if this token needs to be moved to a
465 * different list */
466 if((ptr->profit < 0) && (newprofit >= 0)) {
467 remove_token_from_group(ptr);
468 insert_token_in_group(&good_head,ptr);
469 }
470 ptr->profit = newprofit;
471 }
472
473 if (last_ptr) last_ptr->smaller = ptr;
474 last_ptr = ptr;
475
476 ptr = ptr->smaller;
477 }
478}
479
480/* decrease the counts for all the tokens that start with "data", and have lenghts
481 * from 2 to "len". This function is much simpler than learn_token because we have
482 * more guarantees (tho tokens exist, the ->smaller pointer is set, etc.)
483 * The two separate functions exist only because of compression performance */
484static void forget_token(unsigned char *data, int len)
485{
486 struct token *ptr;
487 int i, newprofit;
488 unsigned int hash=0;
489
490 if (len > MAX_TOK_SIZE) len = MAX_TOK_SIZE;
491
492 hash = hash_token(data, len);
493 ptr = find_token_hash(data, len, hash);
494
495 for (i = len; i >= 2; i--) {
496
497 newprofit = ptr->profit - (ptr->len - 1);
498 if ((ptr->profit >= 0) && (newprofit < 0)) {
499 remove_token_from_group(ptr);
500 insert_token_in_group(&bad_head, ptr);
501 }
502 ptr->profit=newprofit;
503
504 ptr=ptr->smaller;
505 }
506}
507
508/* count all the possible tokens in a symbol */ 318/* count all the possible tokens in a symbol */
509static void learn_symbol(unsigned char *symbol, int len) 319static void learn_symbol(unsigned char *symbol, int len)
510{ 320{
511 int i; 321 int i;
512 322
513 for (i = 0; i < len - 1; i++) 323 for (i = 0; i < len - 1; i++)
514 learn_token(symbol + i, len - i); 324 token_profit[ symbol[i] + (symbol[i + 1] << 8) ]++;
515} 325}
516 326
517/* decrease the count for all the possible tokens in a symbol */ 327/* decrease the count for all the possible tokens in a symbol */
@@ -520,117 +330,90 @@ static void forget_symbol(unsigned char *symbol, int len)
520 int i; 330 int i;
521 331
522 for (i = 0; i < len - 1; i++) 332 for (i = 0; i < len - 1; i++)
523 forget_token(symbol + i, len - i); 333 token_profit[ symbol[i] + (symbol[i + 1] << 8) ]--;
524} 334}
525 335
526/* set all the symbol flags and do the initial token count */ 336/* remove all the invalid symbols from the table and do the initial token count */
527static void build_initial_tok_table(void) 337static void build_initial_tok_table(void)
528{ 338{
529 int i, use_it, valid; 339 unsigned int i, pos;
530 340
531 valid = 0; 341 pos = 0;
532 for (i = 0; i < cnt; i++) { 342 for (i = 0; i < table_cnt; i++) {
533 table[i].flags = 0;
534 if ( symbol_valid(&table[i]) ) { 343 if ( symbol_valid(&table[i]) ) {
535 table[i].flags |= SYM_FLAG_VALID; 344 if (pos != i)
536 valid++; 345 table[pos] = table[i];
346 learn_symbol(table[pos].sym, table[pos].len);
347 pos++;
537 } 348 }
538 } 349 }
539 350 table_cnt = pos;
540 use_it = 0;
541 for (i = 0; i < cnt; i++) {
542
543 /* subsample the available symbols. This method is almost like
544 * a Bresenham's algorithm to get uniformly distributed samples
545 * across the symbol table */
546 if (table[i].flags & SYM_FLAG_VALID) {
547
548 use_it += WORKING_SET;
549
550 if (use_it >= valid) {
551 table[i].flags |= SYM_FLAG_SAMPLED;
552 use_it -= valid;
553 }
554 }
555 if (table[i].flags & SYM_FLAG_SAMPLED)
556 learn_symbol(table[i].sym, table[i].len);
557 }
558} 351}
559 352
560/* replace a given token in all the valid symbols. Use the sampled symbols 353/* replace a given token in all the valid symbols. Use the sampled symbols
561 * to update the counts */ 354 * to update the counts */
562static void compress_symbols(unsigned char *str, int tlen, int idx) 355static void compress_symbols(unsigned char *str, int idx)
563{ 356{
564 int i, len, learn, size; 357 unsigned int i, len, size;
565 unsigned char *p; 358 unsigned char *p1, *p2;
566 359
567 for (i = 0; i < cnt; i++) { 360 for (i = 0; i < table_cnt; i++) {
568
569 if (!(table[i].flags & SYM_FLAG_VALID)) continue;
570 361
571 len = table[i].len; 362 len = table[i].len;
572 learn = 0; 363 p1 = table[i].sym;
573 p = table[i].sym; 364
365 /* find the token on the symbol */
366 p2 = memmem(p1, len, str, 2);
367 if (!p2) continue;
368
369 /* decrease the counts for this symbol's tokens */
370 forget_symbol(table[i].sym, len);
371
372 size = len;
574 373
575 do { 374 do {
375 *p2 = idx;
376 p2++;
377 size -= (p2 - p1);
378 memmove(p2, p2 + 1, size);
379 p1 = p2;
380 len--;
381
382 if (size < 2) break;
383
576 /* find the token on the symbol */ 384 /* find the token on the symbol */
577 p = (unsigned char *) strstr((char *) p, (char *) str); 385 p2 = memmem(p1, size, str, 2);
578 if (!p) break;
579
580 if (!learn) {
581 /* if this symbol was used to count, decrease it */
582 if (table[i].flags & SYM_FLAG_SAMPLED)
583 forget_symbol(table[i].sym, len);
584 learn = 1;
585 }
586 386
587 *p = idx; 387 } while (p2);
588 size = (len - (p - table[i].sym)) - tlen + 1;
589 memmove(p + 1, p + tlen, size);
590 p++;
591 len -= tlen - 1;
592 388
593 } while (size >= tlen); 389 table[i].len = len;
594 390
595 if(learn) { 391 /* increase the counts for this symbol's new tokens */
596 table[i].len = len; 392 learn_symbol(table[i].sym, len);
597 /* if this symbol was used to count, learn it again */
598 if(table[i].flags & SYM_FLAG_SAMPLED)
599 learn_symbol(table[i].sym, len);
600 }
601 } 393 }
602} 394}
603 395
604/* search the token with the maximum profit */ 396/* search the token with the maximum profit */
605static struct token *find_best_token(void) 397static int find_best_token(void)
606{ 398{
607 struct token *ptr,*best,*head; 399 int i, best, bestprofit;
608 int bestprofit;
609 400
610 bestprofit=-10000; 401 bestprofit=-10000;
402 best = 0;
611 403
612 /* failsafe: if the "good" list is empty search from the "bad" list */ 404 for (i = 0; i < 0x10000; i++) {
613 if(good_head.right == &good_head) head = &bad_head; 405 if (token_profit[i] > bestprofit) {
614 else head = &good_head; 406 best = i;
615 407 bestprofit = token_profit[i];
616 ptr = head->right;
617 best = NULL;
618 while (ptr != head) {
619 if (ptr->profit > bestprofit) {
620 bestprofit = ptr->profit;
621 best = ptr;
622 } 408 }
623 ptr = ptr->right;
624 } 409 }
625
626 return best; 410 return best;
627} 411}
628 412
629/* this is the core of the algorithm: calculate the "best" table */ 413/* this is the core of the algorithm: calculate the "best" table */
630static void optimize_result(void) 414static void optimize_result(void)
631{ 415{
632 struct token *best; 416 int i, best;
633 int i;
634 417
635 /* using the '\0' symbol last allows compress_symbols to use standard 418 /* using the '\0' symbol last allows compress_symbols to use standard
636 * fast string functions */ 419 * fast string functions */
@@ -644,14 +427,12 @@ static void optimize_result(void)
644 best = find_best_token(); 427 best = find_best_token();
645 428
646 /* place it in the "best" table */ 429 /* place it in the "best" table */
647 best_table_len[i] = best->len; 430 best_table_len[i] = 2;
648 memcpy(best_table[i], best->data, best_table_len[i]); 431 best_table[i][0] = best & 0xFF;
649 /* zero terminate the token so that we can use strstr 432 best_table[i][1] = (best >> 8) & 0xFF;
650 in compress_symbols */
651 best_table[i][best_table_len[i]]='\0';
652 433
653 /* replace this token in all the valid symbols */ 434 /* replace this token in all the valid symbols */
654 compress_symbols(best_table[i], best_table_len[i], i); 435 compress_symbols(best_table[i], i);
655 } 436 }
656 } 437 }
657} 438}
@@ -659,39 +440,28 @@ static void optimize_result(void)
659/* start by placing the symbols that are actually used on the table */ 440/* start by placing the symbols that are actually used on the table */
660static void insert_real_symbols_in_table(void) 441static void insert_real_symbols_in_table(void)
661{ 442{
662 int i, j, c; 443 unsigned int i, j, c;
663 444
664 memset(best_table, 0, sizeof(best_table)); 445 memset(best_table, 0, sizeof(best_table));
665 memset(best_table_len, 0, sizeof(best_table_len)); 446 memset(best_table_len, 0, sizeof(best_table_len));
666 447
667 for (i = 0; i < cnt; i++) { 448 for (i = 0; i < table_cnt; i++) {
668 if (table[i].flags & SYM_FLAG_VALID) { 449 for (j = 0; j < table[i].len; j++) {
669 for (j = 0; j < table[i].len; j++) { 450 c = table[i].sym[j];
670 c = table[i].sym[j]; 451 best_table[c][0]=c;
671 best_table[c][0]=c; 452 best_table_len[c]=1;
672 best_table_len[c]=1;
673 }
674 } 453 }
675 } 454 }
676} 455}
677 456
678static void optimize_token_table(void) 457static void optimize_token_table(void)
679{ 458{
680 memset(hash_table, 0, sizeof(hash_table));
681
682 good_head.left = &good_head;
683 good_head.right = &good_head;
684
685 bad_head.left = &bad_head;
686 bad_head.right = &bad_head;
687
688 build_initial_tok_table(); 459 build_initial_tok_table();
689 460
690 insert_real_symbols_in_table(); 461 insert_real_symbols_in_table();
691 462
692 /* When valid symbol is not registered, exit to error */ 463 /* When valid symbol is not registered, exit to error */
693 if (good_head.left == good_head.right && 464 if (!table_cnt) {
694 bad_head.left == bad_head.right) {
695 fprintf(stderr, "No valid symbol.\n"); 465 fprintf(stderr, "No valid symbol.\n");
696 exit(1); 466 exit(1);
697 } 467 }
@@ -700,8 +470,7 @@ static void optimize_token_table(void)
700} 470}
701 471
702 472
703int 473int main(int argc, char **argv)
704main(int argc, char **argv)
705{ 474{
706 if (argc >= 2) { 475 if (argc >= 2) {
707 int i; 476 int i;
diff --git a/scripts/ver_linux b/scripts/ver_linux
index a28c279c49dd..beb43ef7f761 100755
--- a/scripts/ver_linux
+++ b/scripts/ver_linux
@@ -25,9 +25,11 @@ ld -v | awk -F\) '{print $1}' | awk \
25'/BFD/{print "binutils ",$NF} \ 25'/BFD/{print "binutils ",$NF} \
26/^GNU/{print "binutils ",$4}' 26/^GNU/{print "binutils ",$4}'
27 27
28fdformat --version | awk -F\- '{print "util-linux ", $NF}' 28echo -n "util-linux "
29fdformat --version | awk '{print $NF}' | sed -e s/^util-linux-// -e s/\)$//
29 30
30mount --version | awk -F\- '{print "mount ", $NF}' 31echo -n "mount "
32mount --version | awk '{print $NF}' | sed -e s/^mount-// -e s/\)$//
31 33
32depmod -V 2>&1 | awk 'NR==1 {print "module-init-tools ",$NF}' 34depmod -V 2>&1 | awk 'NR==1 {print "module-init-tools ",$NF}'
33 35
diff --git a/sound/arm/Makefile b/sound/arm/Makefile
index 103f136926d9..4ef6dd00c6ee 100644
--- a/sound/arm/Makefile
+++ b/sound/arm/Makefile
@@ -2,12 +2,14 @@
2# Makefile for ALSA 2# Makefile for ALSA
3# 3#
4 4
5snd-sa11xx-uda1341-objs := sa11xx-uda1341.o
6snd-aaci-objs := aaci.o devdma.o
7snd-pxa2xx-pcm-objs := pxa2xx-pcm.o
8snd-pxa2xx-ac97-objs := pxa2xx-ac97.o
9
10obj-$(CONFIG_SND_SA11XX_UDA1341) += snd-sa11xx-uda1341.o 5obj-$(CONFIG_SND_SA11XX_UDA1341) += snd-sa11xx-uda1341.o
6snd-sa11xx-uda1341-objs := sa11xx-uda1341.o
7
11obj-$(CONFIG_SND_ARMAACI) += snd-aaci.o 8obj-$(CONFIG_SND_ARMAACI) += snd-aaci.o
12obj-$(CONFIG_SND_PXA2XX_PCM) += snd-pxa2xx-pcm.o 9snd-aaci-objs := aaci.o devdma.o
13obj-$(CONFIG_SND_PXA2XX_AC97) += snd-pxa2xx-ac97.o 10
11obj-$(CONFIG_SND_PXA2XX_PCM) += snd-pxa2xx-pcm.o
12snd-pxa2xx-pcm-objs := pxa2xx-pcm.o
13
14obj-$(CONFIG_SND_PXA2XX_AC97) += snd-pxa2xx-ac97.o
15snd-pxa2xx-ac97-objs := pxa2xx-ac97.o
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 08cc3ddca96f..98877030d579 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -821,7 +821,7 @@ static int __devinit aaci_init_pcm(struct aaci *aaci)
821 821
822static unsigned int __devinit aaci_size_fifo(struct aaci *aaci) 822static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
823{ 823{
824 void *base = aaci->base + AACI_CSCH1; 824 void __iomem *base = aaci->base + AACI_CSCH1;
825 int i; 825 int i;
826 826
827 writel(TXCR_FEN | TXCR_TSZ16 | TXCR_TXEN, base + AACI_TXCR); 827 writel(TXCR_FEN | TXCR_TSZ16 | TXCR_TXEN, base + AACI_TXCR);
@@ -877,7 +877,7 @@ static int __devinit aaci_probe(struct amba_device *dev, void *id)
877 aaci->playback.fifo = aaci->base + AACI_DR1; 877 aaci->playback.fifo = aaci->base + AACI_DR1;
878 878
879 for (i = 0; i < 4; i++) { 879 for (i = 0; i < 4; i++) {
880 void *base = aaci->base + i * 0x14; 880 void __iomem *base = aaci->base + i * 0x14;
881 881
882 writel(0, base + AACI_IE); 882 writel(0, base + AACI_IE);
883 writel(0, base + AACI_TXCR); 883 writel(0, base + AACI_TXCR);
diff --git a/sound/arm/aaci.h b/sound/arm/aaci.h
index d752e6426894..b2f969bc7845 100644
--- a/sound/arm/aaci.h
+++ b/sound/arm/aaci.h
@@ -200,8 +200,8 @@
200 200
201 201
202struct aaci_runtime { 202struct aaci_runtime {
203 void *base; 203 void __iomem *base;
204 void *fifo; 204 void __iomem *fifo;
205 205
206 struct ac97_pcm *pcm; 206 struct ac97_pcm *pcm;
207 int pcm_open; 207 int pcm_open;
@@ -223,7 +223,7 @@ struct aaci_runtime {
223struct aaci { 223struct aaci {
224 struct amba_device *dev; 224 struct amba_device *dev;
225 snd_card_t *card; 225 snd_card_t *card;
226 void *base; 226 void __iomem *base;
227 unsigned int fifosize; 227 unsigned int fifosize;
228 228
229 /* AC'97 */ 229 /* AC'97 */
diff --git a/sound/core/memory.c b/sound/core/memory.c
index 1622893d00a2..291b4769bde3 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -116,15 +116,21 @@ void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags)
116 return _snd_kmalloc(size, flags); 116 return _snd_kmalloc(size, flags);
117} 117}
118 118
119void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags)
120{
121 void *ret = _snd_kmalloc(size, flags);
122 if (ret)
123 memset(ret, 0, size);
124 return ret;
125}
126EXPORT_SYMBOL(snd_hidden_kzalloc);
127
119void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags) 128void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags)
120{ 129{
121 void *ret = NULL; 130 void *ret = NULL;
122 if (n != 0 && size > INT_MAX / n) 131 if (n != 0 && size > INT_MAX / n)
123 return ret; 132 return ret;
124 ret = _snd_kmalloc(n * size, flags); 133 return snd_hidden_kzalloc(n * size, flags);
125 if (ret)
126 memset(ret, 0, n * size);
127 return ret;
128} 134}
129 135
130void snd_hidden_kfree(const void *obj) 136void snd_hidden_kfree(const void *obj)
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index be4ea60a3679..5c3948311528 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -15,7 +15,8 @@ config SND_CS4231_LIB
15 15
16config SND_AD1816A 16config SND_AD1816A
17 tristate "Analog Devices SoundPort AD1816A" 17 tristate "Analog Devices SoundPort AD1816A"
18 depends on SND && ISAPNP 18 depends on SND && PNP && ISA
19 select ISAPNP
19 select SND_OPL3_LIB 20 select SND_OPL3_LIB
20 select SND_MPU401_UART 21 select SND_MPU401_UART
21 select SND_PCM 22 select SND_PCM
@@ -80,7 +81,8 @@ config SND_CS4236
80 81
81config SND_ES968 82config SND_ES968
82 tristate "Generic ESS ES968 driver" 83 tristate "Generic ESS ES968 driver"
83 depends on SND && ISAPNP 84 depends on SND && PNP && ISA
85 select ISAPNP
84 select SND_MPU401_UART 86 select SND_MPU401_UART
85 select SND_PCM 87 select SND_PCM
86 help 88 help
@@ -160,7 +162,7 @@ config SND_GUSMAX
160 162
161config SND_INTERWAVE 163config SND_INTERWAVE
162 tristate "AMD InterWave, Gravis UltraSound PnP" 164 tristate "AMD InterWave, Gravis UltraSound PnP"
163 depends on SND 165 depends on SND && PNP && ISA
164 select SND_RAWMIDI 166 select SND_RAWMIDI
165 select SND_CS4231_LIB 167 select SND_CS4231_LIB
166 select SND_GUS_SYNTH 168 select SND_GUS_SYNTH
@@ -175,7 +177,7 @@ config SND_INTERWAVE
175 177
176config SND_INTERWAVE_STB 178config SND_INTERWAVE_STB
177 tristate "AMD InterWave + TEA6330T (UltraSound 32-Pro)" 179 tristate "AMD InterWave + TEA6330T (UltraSound 32-Pro)"
178 depends on SND 180 depends on SND && PNP && ISA
179 select SND_RAWMIDI 181 select SND_RAWMIDI
180 select SND_CS4231_LIB 182 select SND_CS4231_LIB
181 select SND_GUS_SYNTH 183 select SND_GUS_SYNTH
@@ -291,7 +293,8 @@ config SND_WAVEFRONT
291 293
292config SND_ALS100 294config SND_ALS100
293 tristate "Avance Logic ALS100/ALS120" 295 tristate "Avance Logic ALS100/ALS120"
294 depends on SND && ISAPNP 296 depends on SND && PNP && ISA
297 select ISAPNP
295 select SND_OPL3_LIB 298 select SND_OPL3_LIB
296 select SND_MPU401_UART 299 select SND_MPU401_UART
297 select SND_PCM 300 select SND_PCM
@@ -304,7 +307,8 @@ config SND_ALS100
304 307
305config SND_AZT2320 308config SND_AZT2320
306 tristate "Aztech Systems AZT2320" 309 tristate "Aztech Systems AZT2320"
307 depends on SND && ISAPNP 310 depends on SND && PNP && ISA
311 select ISAPNP
308 select SND_OPL3_LIB 312 select SND_OPL3_LIB
309 select SND_MPU401_UART 313 select SND_MPU401_UART
310 select SND_CS4231_LIB 314 select SND_CS4231_LIB
@@ -328,7 +332,8 @@ config SND_CMI8330
328 332
329config SND_DT019X 333config SND_DT019X
330 tristate "Diamond Technologies DT-019X, Avance Logic ALS-007" 334 tristate "Diamond Technologies DT-019X, Avance Logic ALS-007"
331 depends on SND && ISAPNP 335 depends on SND && PNP && ISA
336 select ISAPNP
332 select SND_OPL3_LIB 337 select SND_OPL3_LIB
333 select SND_MPU401_UART 338 select SND_MPU401_UART
334 select SND_PCM 339 select SND_PCM
diff --git a/sound/oss/os.h b/sound/oss/os.h
index d6b96297835c..80dce329cc3a 100644
--- a/sound/oss/os.h
+++ b/sound/oss/os.h
@@ -19,9 +19,6 @@
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/system.h> 21#include <asm/system.h>
22#ifdef __alpha__
23#include <asm/segment.h>
24#endif
25#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
26#include <asm/uaccess.h> 23#include <asm/uaccess.h>
27#include <linux/poll.h> 24#include <linux/poll.h>
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index ce6c9fadb594..4943299cf137 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -2249,7 +2249,7 @@ static int __devinit snd_ali_create(snd_card_t * card,
2249 return -ENXIO; 2249 return -ENXIO;
2250 } 2250 }
2251 2251
2252 if ((codec = kcalloc(1, sizeof(*codec), GFP_KERNEL)) == NULL) { 2252 if ((codec = kzalloc(sizeof(*codec), GFP_KERNEL)) == NULL) {
2253 pci_disable_device(pci); 2253 pci_disable_device(pci);
2254 return -ENOMEM; 2254 return -ENOMEM;
2255 } 2255 }